11a6a4b6cSPavankumar Nandeshwar // SPDX-License-Identifier: BSD-3-Clause-Clear
21a6a4b6cSPavankumar Nandeshwar /*
31a6a4b6cSPavankumar Nandeshwar * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
41a6a4b6cSPavankumar Nandeshwar * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
51a6a4b6cSPavankumar Nandeshwar */
61a6a4b6cSPavankumar Nandeshwar
71a6a4b6cSPavankumar Nandeshwar #include "dp_rx.h"
81a6a4b6cSPavankumar Nandeshwar #include "../dp_tx.h"
96b4954d3SPavankumar Nandeshwar #include "../peer.h"
1052537339SPavankumar Nandeshwar #include "hal_qcn9274.h"
1152537339SPavankumar Nandeshwar #include "hal_wcn7850.h"
12023ace9fSBaochen Qiang #include "hal_qcc2072.h"
136b4954d3SPavankumar Nandeshwar
ath12k_wifi7_dp_rx_get_peer_id(struct ath12k_dp * dp,enum ath12k_peer_metadata_version ver,__le32 peer_metadata)14147daefcSPavankumar Nandeshwar static u16 ath12k_wifi7_dp_rx_get_peer_id(struct ath12k_dp *dp,
15147daefcSPavankumar Nandeshwar enum ath12k_peer_metadata_version ver,
16147daefcSPavankumar Nandeshwar __le32 peer_metadata)
17147daefcSPavankumar Nandeshwar {
18147daefcSPavankumar Nandeshwar switch (ver) {
19147daefcSPavankumar Nandeshwar default:
20147daefcSPavankumar Nandeshwar ath12k_warn(dp->ab, "Unknown peer metadata version: %d", ver);
21147daefcSPavankumar Nandeshwar fallthrough;
22147daefcSPavankumar Nandeshwar case ATH12K_PEER_METADATA_V0:
23147daefcSPavankumar Nandeshwar return le32_get_bits(peer_metadata,
24147daefcSPavankumar Nandeshwar RX_MPDU_DESC_META_DATA_V0_PEER_ID);
25147daefcSPavankumar Nandeshwar case ATH12K_PEER_METADATA_V1:
26147daefcSPavankumar Nandeshwar return le32_get_bits(peer_metadata,
27147daefcSPavankumar Nandeshwar RX_MPDU_DESC_META_DATA_V1_PEER_ID);
28147daefcSPavankumar Nandeshwar case ATH12K_PEER_METADATA_V1A:
29147daefcSPavankumar Nandeshwar return le32_get_bits(peer_metadata,
30147daefcSPavankumar Nandeshwar RX_MPDU_DESC_META_DATA_V1A_PEER_ID);
31147daefcSPavankumar Nandeshwar case ATH12K_PEER_METADATA_V1B:
32147daefcSPavankumar Nandeshwar return le32_get_bits(peer_metadata,
33147daefcSPavankumar Nandeshwar RX_MPDU_DESC_META_DATA_V1B_PEER_ID);
34147daefcSPavankumar Nandeshwar }
35147daefcSPavankumar Nandeshwar }
36147daefcSPavankumar Nandeshwar
ath12k_wifi7_peer_rx_tid_qref_setup(struct ath12k_base * ab,u16 peer_id,u16 tid,dma_addr_t paddr)37972f34d5SPavankumar Nandeshwar void ath12k_wifi7_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid,
3852c55568SPavankumar Nandeshwar dma_addr_t paddr)
3952c55568SPavankumar Nandeshwar {
4052c55568SPavankumar Nandeshwar struct ath12k_reo_queue_ref *qref;
413a52762bSRipan Deuri struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
4252c55568SPavankumar Nandeshwar bool ml_peer = false;
4352c55568SPavankumar Nandeshwar
4452c55568SPavankumar Nandeshwar if (!ab->hw_params->reoq_lut_support)
4552c55568SPavankumar Nandeshwar return;
4652c55568SPavankumar Nandeshwar
4752c55568SPavankumar Nandeshwar if (peer_id & ATH12K_PEER_ML_ID_VALID) {
4852c55568SPavankumar Nandeshwar peer_id &= ~ATH12K_PEER_ML_ID_VALID;
4952c55568SPavankumar Nandeshwar ml_peer = true;
5052c55568SPavankumar Nandeshwar }
5152c55568SPavankumar Nandeshwar
5252c55568SPavankumar Nandeshwar if (ml_peer)
5352c55568SPavankumar Nandeshwar qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
5452c55568SPavankumar Nandeshwar (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
5552c55568SPavankumar Nandeshwar else
5652c55568SPavankumar Nandeshwar qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
5752c55568SPavankumar Nandeshwar (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
5852c55568SPavankumar Nandeshwar
5952c55568SPavankumar Nandeshwar qref->info0 = u32_encode_bits(lower_32_bits(paddr),
6052c55568SPavankumar Nandeshwar BUFFER_ADDR_INFO0_ADDR);
6152c55568SPavankumar Nandeshwar qref->info1 = u32_encode_bits(upper_32_bits(paddr),
6252c55568SPavankumar Nandeshwar BUFFER_ADDR_INFO1_ADDR) |
6352c55568SPavankumar Nandeshwar u32_encode_bits(tid, DP_REO_QREF_NUM);
640cafe8ccSHarsh Kumar Bijlani
6552c55568SPavankumar Nandeshwar ath12k_hal_reo_shared_qaddr_cache_clear(ab);
6652c55568SPavankumar Nandeshwar }
6752c55568SPavankumar Nandeshwar
ath12k_wifi7_peer_rx_tid_qref_reset(struct ath12k_base * ab,u16 peer_id,u16 tid)68631ee338SJeff Johnson void ath12k_wifi7_peer_rx_tid_qref_reset(struct ath12k_base *ab,
69972f34d5SPavankumar Nandeshwar u16 peer_id, u16 tid)
7052c55568SPavankumar Nandeshwar {
7152c55568SPavankumar Nandeshwar struct ath12k_reo_queue_ref *qref;
723a52762bSRipan Deuri struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
7352c55568SPavankumar Nandeshwar bool ml_peer = false;
7452c55568SPavankumar Nandeshwar
7552c55568SPavankumar Nandeshwar if (!ab->hw_params->reoq_lut_support)
7652c55568SPavankumar Nandeshwar return;
7752c55568SPavankumar Nandeshwar
7852c55568SPavankumar Nandeshwar if (peer_id & ATH12K_PEER_ML_ID_VALID) {
7952c55568SPavankumar Nandeshwar peer_id &= ~ATH12K_PEER_ML_ID_VALID;
8052c55568SPavankumar Nandeshwar ml_peer = true;
8152c55568SPavankumar Nandeshwar }
8252c55568SPavankumar Nandeshwar
8352c55568SPavankumar Nandeshwar if (ml_peer)
8452c55568SPavankumar Nandeshwar qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
8552c55568SPavankumar Nandeshwar (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
8652c55568SPavankumar Nandeshwar else
8752c55568SPavankumar Nandeshwar qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
8852c55568SPavankumar Nandeshwar (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
8952c55568SPavankumar Nandeshwar
9052c55568SPavankumar Nandeshwar qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR);
9152c55568SPavankumar Nandeshwar qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) |
9252c55568SPavankumar Nandeshwar u32_encode_bits(tid, DP_REO_QREF_NUM);
9352c55568SPavankumar Nandeshwar }
9452c55568SPavankumar Nandeshwar
ath12k_wifi7_dp_rx_peer_tid_delete(struct ath12k_base * ab,struct ath12k_dp_link_peer * peer,u8 tid)9573c92834SPavankumar Nandeshwar void ath12k_wifi7_dp_rx_peer_tid_delete(struct ath12k_base *ab,
969e0b56a3SHarsh Kumar Bijlani struct ath12k_dp_link_peer *peer, u8 tid)
9752c55568SPavankumar Nandeshwar {
98631ee338SJeff Johnson struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
9952c55568SPavankumar Nandeshwar
10011157e09SHarsh Kumar Bijlani if (!(peer->rx_tid_active_bitmask & (1 << tid)))
10152c55568SPavankumar Nandeshwar return;
10252c55568SPavankumar Nandeshwar
103631ee338SJeff Johnson ath12k_dp_mark_tid_as_inactive(dp, peer->peer_id, tid);
104631ee338SJeff Johnson ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(dp);
10552c55568SPavankumar Nandeshwar }
10652c55568SPavankumar Nandeshwar
ath12k_wifi7_dp_rx_link_desc_return(struct ath12k_dp * dp,struct ath12k_buffer_addr * buf_addr_info,enum hal_wbm_rel_bm_act action)10796b42732SPavankumar Nandeshwar int ath12k_wifi7_dp_rx_link_desc_return(struct ath12k_dp *dp,
10852c55568SPavankumar Nandeshwar struct ath12k_buffer_addr *buf_addr_info,
10952c55568SPavankumar Nandeshwar enum hal_wbm_rel_bm_act action)
11052c55568SPavankumar Nandeshwar {
11196b42732SPavankumar Nandeshwar struct ath12k_base *ab = dp->ab;
11252c55568SPavankumar Nandeshwar struct hal_wbm_release_ring *desc;
11352c55568SPavankumar Nandeshwar struct hal_srng *srng;
11452c55568SPavankumar Nandeshwar int ret = 0;
11552c55568SPavankumar Nandeshwar
11696b42732SPavankumar Nandeshwar srng = &dp->hal->srng_list[dp->wbm_desc_rel_ring.ring_id];
11752c55568SPavankumar Nandeshwar
11852c55568SPavankumar Nandeshwar spin_lock_bh(&srng->lock);
11952c55568SPavankumar Nandeshwar
12052c55568SPavankumar Nandeshwar ath12k_hal_srng_access_begin(ab, srng);
12152c55568SPavankumar Nandeshwar
12252c55568SPavankumar Nandeshwar desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
12352c55568SPavankumar Nandeshwar if (!desc) {
12452c55568SPavankumar Nandeshwar ret = -ENOBUFS;
12552c55568SPavankumar Nandeshwar goto exit;
12652c55568SPavankumar Nandeshwar }
12752c55568SPavankumar Nandeshwar
128972f34d5SPavankumar Nandeshwar ath12k_wifi7_hal_rx_msdu_link_desc_set(ab, desc, buf_addr_info, action);
12952c55568SPavankumar Nandeshwar
13052c55568SPavankumar Nandeshwar exit:
13152c55568SPavankumar Nandeshwar ath12k_hal_srng_access_end(ab, srng);
13252c55568SPavankumar Nandeshwar
13352c55568SPavankumar Nandeshwar spin_unlock_bh(&srng->lock);
13452c55568SPavankumar Nandeshwar
13552c55568SPavankumar Nandeshwar return ret;
13652c55568SPavankumar Nandeshwar }
13752c55568SPavankumar Nandeshwar
ath12k_wifi7_dp_reo_cmd_send(struct ath12k_base * ab,struct ath12k_dp_rx_tid_rxq * rx_tid,enum hal_reo_cmd_type type,struct ath12k_hal_reo_cmd * cmd,void (* cb)(struct ath12k_dp * dp,void * ctx,enum hal_reo_cmd_status status))138972f34d5SPavankumar Nandeshwar int ath12k_wifi7_dp_reo_cmd_send(struct ath12k_base *ab,
139631ee338SJeff Johnson struct ath12k_dp_rx_tid_rxq *rx_tid,
14052c55568SPavankumar Nandeshwar enum hal_reo_cmd_type type,
14152c55568SPavankumar Nandeshwar struct ath12k_hal_reo_cmd *cmd,
14252c55568SPavankumar Nandeshwar void (*cb)(struct ath12k_dp *dp, void *ctx,
14352c55568SPavankumar Nandeshwar enum hal_reo_cmd_status status))
14452c55568SPavankumar Nandeshwar {
1453a52762bSRipan Deuri struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
14652c55568SPavankumar Nandeshwar struct ath12k_dp_rx_reo_cmd *dp_cmd;
14752c55568SPavankumar Nandeshwar struct hal_srng *cmd_ring;
14852c55568SPavankumar Nandeshwar int cmd_num;
14952c55568SPavankumar Nandeshwar
15052c55568SPavankumar Nandeshwar cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
151972f34d5SPavankumar Nandeshwar cmd_num = ath12k_wifi7_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
15252c55568SPavankumar Nandeshwar
15352c55568SPavankumar Nandeshwar /* cmd_num should start from 1, during failure return the error code */
15452c55568SPavankumar Nandeshwar if (cmd_num < 0)
15552c55568SPavankumar Nandeshwar return cmd_num;
15652c55568SPavankumar Nandeshwar
15752c55568SPavankumar Nandeshwar /* reo cmd ring descriptors has cmd_num starting from 1 */
15852c55568SPavankumar Nandeshwar if (cmd_num == 0)
15952c55568SPavankumar Nandeshwar return -EINVAL;
16052c55568SPavankumar Nandeshwar
16152c55568SPavankumar Nandeshwar if (!cb)
16252c55568SPavankumar Nandeshwar return 0;
16352c55568SPavankumar Nandeshwar
16452c55568SPavankumar Nandeshwar /* Can this be optimized so that we keep the pending command list only
16552c55568SPavankumar Nandeshwar * for tid delete command to free up the resource on the command status
16652c55568SPavankumar Nandeshwar * indication?
16752c55568SPavankumar Nandeshwar */
168*69050f8dSKees Cook dp_cmd = kzalloc_obj(*dp_cmd, GFP_ATOMIC);
16952c55568SPavankumar Nandeshwar
17052c55568SPavankumar Nandeshwar if (!dp_cmd)
17152c55568SPavankumar Nandeshwar return -ENOMEM;
17252c55568SPavankumar Nandeshwar
17352c55568SPavankumar Nandeshwar memcpy(&dp_cmd->data, rx_tid, sizeof(*rx_tid));
17452c55568SPavankumar Nandeshwar dp_cmd->cmd_num = cmd_num;
17552c55568SPavankumar Nandeshwar dp_cmd->handler = cb;
17652c55568SPavankumar Nandeshwar
17752c55568SPavankumar Nandeshwar spin_lock_bh(&dp->reo_cmd_lock);
17852c55568SPavankumar Nandeshwar list_add_tail(&dp_cmd->list, &dp->reo_cmd_list);
17952c55568SPavankumar Nandeshwar spin_unlock_bh(&dp->reo_cmd_lock);
18052c55568SPavankumar Nandeshwar
18152c55568SPavankumar Nandeshwar return 0;
18252c55568SPavankumar Nandeshwar }
18352c55568SPavankumar Nandeshwar
ath12k_wifi7_peer_rx_tid_reo_update(struct ath12k_dp * dp,struct ath12k_dp_link_peer * peer,struct ath12k_dp_rx_tid * rx_tid,u32 ba_win_sz,u16 ssn,bool update_ssn)18473c92834SPavankumar Nandeshwar int ath12k_wifi7_peer_rx_tid_reo_update(struct ath12k_dp *dp,
1859e0b56a3SHarsh Kumar Bijlani struct ath12k_dp_link_peer *peer,
18652c55568SPavankumar Nandeshwar struct ath12k_dp_rx_tid *rx_tid,
18752c55568SPavankumar Nandeshwar u32 ba_win_sz, u16 ssn,
18852c55568SPavankumar Nandeshwar bool update_ssn)
18952c55568SPavankumar Nandeshwar {
19052c55568SPavankumar Nandeshwar struct ath12k_hal_reo_cmd cmd = {};
19173c92834SPavankumar Nandeshwar struct ath12k_base *ab = dp->ab;
19252c55568SPavankumar Nandeshwar int ret;
193631ee338SJeff Johnson struct ath12k_dp_rx_tid_rxq rx_tid_rxq;
19452c55568SPavankumar Nandeshwar
195631ee338SJeff Johnson ath12k_dp_init_rx_tid_rxq(&rx_tid_rxq, rx_tid,
196631ee338SJeff Johnson (peer->rx_tid_active_bitmask & (1 << rx_tid->tid)));
197631ee338SJeff Johnson
198631ee338SJeff Johnson cmd.addr_lo = lower_32_bits(rx_tid_rxq.qbuf.paddr_aligned);
199631ee338SJeff Johnson cmd.addr_hi = upper_32_bits(rx_tid_rxq.qbuf.paddr_aligned);
20052c55568SPavankumar Nandeshwar cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
20152c55568SPavankumar Nandeshwar cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
20252c55568SPavankumar Nandeshwar cmd.ba_window_size = ba_win_sz;
20352c55568SPavankumar Nandeshwar
20452c55568SPavankumar Nandeshwar if (update_ssn) {
20552c55568SPavankumar Nandeshwar cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
20652c55568SPavankumar Nandeshwar cmd.upd2 = u32_encode_bits(ssn, HAL_REO_CMD_UPD2_SSN);
20752c55568SPavankumar Nandeshwar }
20852c55568SPavankumar Nandeshwar
209631ee338SJeff Johnson ret = ath12k_wifi7_dp_reo_cmd_send(ab, &rx_tid_rxq,
21052c55568SPavankumar Nandeshwar HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
21152c55568SPavankumar Nandeshwar NULL);
21252c55568SPavankumar Nandeshwar if (ret) {
21373c92834SPavankumar Nandeshwar ath12k_warn(ab, "failed to update rx tid queue, tid %d (%d)\n",
214631ee338SJeff Johnson rx_tid_rxq.tid, ret);
21552c55568SPavankumar Nandeshwar return ret;
21652c55568SPavankumar Nandeshwar }
21752c55568SPavankumar Nandeshwar
21852c55568SPavankumar Nandeshwar rx_tid->ba_win_sz = ba_win_sz;
21952c55568SPavankumar Nandeshwar
22052c55568SPavankumar Nandeshwar return 0;
22152c55568SPavankumar Nandeshwar }
22252c55568SPavankumar Nandeshwar
ath12k_wifi7_dp_reo_cache_flush(struct ath12k_base * ab,struct ath12k_dp_rx_tid_rxq * rx_tid)223631ee338SJeff Johnson int ath12k_wifi7_dp_reo_cache_flush(struct ath12k_base *ab,
224631ee338SJeff Johnson struct ath12k_dp_rx_tid_rxq *rx_tid)
22552c55568SPavankumar Nandeshwar {
22652c55568SPavankumar Nandeshwar struct ath12k_hal_reo_cmd cmd = {};
22752c55568SPavankumar Nandeshwar int ret;
22852c55568SPavankumar Nandeshwar
22952c55568SPavankumar Nandeshwar cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
23052c55568SPavankumar Nandeshwar cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
231631ee338SJeff Johnson /* HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS - all pending MPDUs
232631ee338SJeff Johnson *in the bitmap will be forwarded/flushed to REO output rings
233631ee338SJeff Johnson */
234631ee338SJeff Johnson cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS |
235631ee338SJeff Johnson HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS;
236631ee338SJeff Johnson
237631ee338SJeff Johnson /* For all QoS TIDs (except NON_QOS), the driver allocates a maximum
238631ee338SJeff Johnson * window size of 1024. In such cases, the driver can issue a single
239631ee338SJeff Johnson * 1KB descriptor flush command instead of sending multiple 128-byte
240631ee338SJeff Johnson * flush commands for each QoS TID, improving efficiency.
241631ee338SJeff Johnson */
242631ee338SJeff Johnson
243631ee338SJeff Johnson if (rx_tid->tid != HAL_DESC_REO_NON_QOS_TID)
244631ee338SJeff Johnson cmd.flag |= HAL_REO_CMD_FLG_FLUSH_QUEUE_1K_DESC;
245631ee338SJeff Johnson
246972f34d5SPavankumar Nandeshwar ret = ath12k_wifi7_dp_reo_cmd_send(ab, rx_tid,
24752c55568SPavankumar Nandeshwar HAL_REO_CMD_FLUSH_CACHE,
24852c55568SPavankumar Nandeshwar &cmd, ath12k_dp_reo_cmd_free);
249631ee338SJeff Johnson return ret;
25052c55568SPavankumar Nandeshwar }
25152c55568SPavankumar Nandeshwar
ath12k_wifi7_dp_rx_assign_reoq(struct ath12k_base * ab,struct ath12k_dp_peer * dp_peer,struct ath12k_dp_rx_tid * rx_tid,u16 ssn,enum hal_pn_type pn_type)25211157e09SHarsh Kumar Bijlani int ath12k_wifi7_dp_rx_assign_reoq(struct ath12k_base *ab, struct ath12k_dp_peer *dp_peer,
2536c7ceff2SPavankumar Nandeshwar struct ath12k_dp_rx_tid *rx_tid,
2546c7ceff2SPavankumar Nandeshwar u16 ssn, enum hal_pn_type pn_type)
2556c7ceff2SPavankumar Nandeshwar {
2566c7ceff2SPavankumar Nandeshwar u32 ba_win_sz = rx_tid->ba_win_sz;
2576c7ceff2SPavankumar Nandeshwar struct ath12k_reoq_buf *buf;
2586c7ceff2SPavankumar Nandeshwar void *vaddr, *vaddr_aligned;
2596c7ceff2SPavankumar Nandeshwar dma_addr_t paddr_aligned;
2606c7ceff2SPavankumar Nandeshwar u8 tid = rx_tid->tid;
2616c7ceff2SPavankumar Nandeshwar u32 hw_desc_sz;
2626c7ceff2SPavankumar Nandeshwar int ret;
2636c7ceff2SPavankumar Nandeshwar
26411157e09SHarsh Kumar Bijlani buf = &dp_peer->reoq_bufs[tid];
2656c7ceff2SPavankumar Nandeshwar if (!buf->vaddr) {
2666c7ceff2SPavankumar Nandeshwar /* TODO: Optimize the memory allocation for qos tid based on
2676c7ceff2SPavankumar Nandeshwar * the actual BA window size in REO tid update path.
2686c7ceff2SPavankumar Nandeshwar */
2696c7ceff2SPavankumar Nandeshwar if (tid == HAL_DESC_REO_NON_QOS_TID)
270972f34d5SPavankumar Nandeshwar hw_desc_sz = ath12k_wifi7_hal_reo_qdesc_size(ba_win_sz, tid);
2716c7ceff2SPavankumar Nandeshwar else
272972f34d5SPavankumar Nandeshwar hw_desc_sz = ath12k_wifi7_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX,
273972f34d5SPavankumar Nandeshwar tid);
2746c7ceff2SPavankumar Nandeshwar
2756c7ceff2SPavankumar Nandeshwar vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
2766c7ceff2SPavankumar Nandeshwar if (!vaddr)
2776c7ceff2SPavankumar Nandeshwar return -ENOMEM;
2786c7ceff2SPavankumar Nandeshwar
2796c7ceff2SPavankumar Nandeshwar vaddr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
2806c7ceff2SPavankumar Nandeshwar
281972f34d5SPavankumar Nandeshwar ath12k_wifi7_hal_reo_qdesc_setup(vaddr_aligned, tid, ba_win_sz,
2826c7ceff2SPavankumar Nandeshwar ssn, pn_type);
2836c7ceff2SPavankumar Nandeshwar
2846c7ceff2SPavankumar Nandeshwar paddr_aligned = dma_map_single(ab->dev, vaddr_aligned, hw_desc_sz,
2856c7ceff2SPavankumar Nandeshwar DMA_BIDIRECTIONAL);
2866c7ceff2SPavankumar Nandeshwar ret = dma_mapping_error(ab->dev, paddr_aligned);
2876c7ceff2SPavankumar Nandeshwar if (ret) {
2886c7ceff2SPavankumar Nandeshwar kfree(vaddr);
2896c7ceff2SPavankumar Nandeshwar return ret;
2906c7ceff2SPavankumar Nandeshwar }
2916c7ceff2SPavankumar Nandeshwar
2926c7ceff2SPavankumar Nandeshwar buf->vaddr = vaddr;
2936c7ceff2SPavankumar Nandeshwar buf->paddr_aligned = paddr_aligned;
2946c7ceff2SPavankumar Nandeshwar buf->size = hw_desc_sz;
2956c7ceff2SPavankumar Nandeshwar }
2966c7ceff2SPavankumar Nandeshwar
2976c7ceff2SPavankumar Nandeshwar rx_tid->qbuf = *buf;
2986c7ceff2SPavankumar Nandeshwar
2996c7ceff2SPavankumar Nandeshwar return 0;
3006c7ceff2SPavankumar Nandeshwar }
3016c7ceff2SPavankumar Nandeshwar
ath12k_wifi7_dp_rx_tid_delete_handler(struct ath12k_base * ab,struct ath12k_dp_rx_tid_rxq * rx_tid)302631ee338SJeff Johnson int ath12k_wifi7_dp_rx_tid_delete_handler(struct ath12k_base *ab,
303631ee338SJeff Johnson struct ath12k_dp_rx_tid_rxq *rx_tid)
304631ee338SJeff Johnson {
305631ee338SJeff Johnson struct ath12k_hal_reo_cmd cmd = {};
306631ee338SJeff Johnson
307631ee338SJeff Johnson cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
308631ee338SJeff Johnson cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
309631ee338SJeff Johnson cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
310631ee338SJeff Johnson cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
311631ee338SJeff Johnson /* Observed flush cache failure, to avoid that set vld bit during delete */
312631ee338SJeff Johnson cmd.upd1 |= HAL_REO_CMD_UPD1_VLD;
313631ee338SJeff Johnson
314631ee338SJeff Johnson return ath12k_wifi7_dp_reo_cmd_send(ab, rx_tid,
315631ee338SJeff Johnson HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
316631ee338SJeff Johnson ath12k_dp_rx_tid_del_func);
317631ee338SJeff Johnson }
318631ee338SJeff Johnson
ath12k_wifi7_dp_rx_h_csum_offload(struct sk_buff * msdu,struct hal_rx_desc_data * rx_info)319972f34d5SPavankumar Nandeshwar static void ath12k_wifi7_dp_rx_h_csum_offload(struct sk_buff *msdu,
320e8a1e49cSPavankumar Nandeshwar struct hal_rx_desc_data *rx_info)
321a7cfbb18SPavankumar Nandeshwar {
322a7cfbb18SPavankumar Nandeshwar msdu->ip_summed = (rx_info->ip_csum_fail || rx_info->l4_csum_fail) ?
323a7cfbb18SPavankumar Nandeshwar CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
324a7cfbb18SPavankumar Nandeshwar }
325a7cfbb18SPavankumar Nandeshwar
ath12k_wifi7_dp_rx_h_mpdu(struct ath12k_pdev_dp * dp_pdev,struct sk_buff * msdu,struct hal_rx_desc * rx_desc,struct hal_rx_desc_data * rx_info)3269e0ee04fSRipan Deuri static void ath12k_wifi7_dp_rx_h_mpdu(struct ath12k_pdev_dp *dp_pdev,
327a7cfbb18SPavankumar Nandeshwar struct sk_buff *msdu,
328a7cfbb18SPavankumar Nandeshwar struct hal_rx_desc *rx_desc,
329e8a1e49cSPavankumar Nandeshwar struct hal_rx_desc_data *rx_info)
330a7cfbb18SPavankumar Nandeshwar {
331a7cfbb18SPavankumar Nandeshwar struct ath12k_skb_rxcb *rxcb;
332a7cfbb18SPavankumar Nandeshwar enum hal_encrypt_type enctype;
333a7cfbb18SPavankumar Nandeshwar bool is_decrypted = false;
334a7cfbb18SPavankumar Nandeshwar struct ieee80211_hdr *hdr;
33511157e09SHarsh Kumar Bijlani struct ath12k_dp_peer *peer;
336a7cfbb18SPavankumar Nandeshwar struct ieee80211_rx_status *rx_status = rx_info->rx_status;
337e8a1e49cSPavankumar Nandeshwar u32 err_bitmap = rx_info->err_bitmap;
338a7cfbb18SPavankumar Nandeshwar
3396633dca5SRipan Deuri RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
3406633dca5SRipan Deuri "dp_rx_h_mpdu called without rcu lock");
3416633dca5SRipan Deuri
342a7cfbb18SPavankumar Nandeshwar /* PN for multicast packets will be checked in mac80211 */
343a7cfbb18SPavankumar Nandeshwar rxcb = ATH12K_SKB_RXCB(msdu);
344a7cfbb18SPavankumar Nandeshwar rxcb->is_mcbc = rx_info->is_mcbc;
345a7cfbb18SPavankumar Nandeshwar
346a7cfbb18SPavankumar Nandeshwar if (rxcb->is_mcbc)
347a7cfbb18SPavankumar Nandeshwar rxcb->peer_id = rx_info->peer_id;
348a7cfbb18SPavankumar Nandeshwar
34911157e09SHarsh Kumar Bijlani peer = ath12k_dp_peer_find_by_peerid(dp_pdev, rxcb->peer_id);
350a7cfbb18SPavankumar Nandeshwar if (peer) {
351a7cfbb18SPavankumar Nandeshwar /* resetting mcbc bit because mcbc packets are unicast
352a7cfbb18SPavankumar Nandeshwar * packets only for AP as STA sends unicast packets.
353a7cfbb18SPavankumar Nandeshwar */
354a7cfbb18SPavankumar Nandeshwar rxcb->is_mcbc = rxcb->is_mcbc && !peer->ucast_ra_only;
355a7cfbb18SPavankumar Nandeshwar
356a7cfbb18SPavankumar Nandeshwar if (rxcb->is_mcbc)
357a7cfbb18SPavankumar Nandeshwar enctype = peer->sec_type_grp;
358a7cfbb18SPavankumar Nandeshwar else
359a7cfbb18SPavankumar Nandeshwar enctype = peer->sec_type;
360a7cfbb18SPavankumar Nandeshwar } else {
361a7cfbb18SPavankumar Nandeshwar enctype = HAL_ENCRYPT_TYPE_OPEN;
362a7cfbb18SPavankumar Nandeshwar }
363a7cfbb18SPavankumar Nandeshwar
364a7cfbb18SPavankumar Nandeshwar if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
365e8a1e49cSPavankumar Nandeshwar is_decrypted = rx_info->is_decrypted;
366a7cfbb18SPavankumar Nandeshwar
367a7cfbb18SPavankumar Nandeshwar /* Clear per-MPDU flags while leaving per-PPDU flags intact */
368a7cfbb18SPavankumar Nandeshwar rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
369a7cfbb18SPavankumar Nandeshwar RX_FLAG_MMIC_ERROR |
370a7cfbb18SPavankumar Nandeshwar RX_FLAG_DECRYPTED |
371a7cfbb18SPavankumar Nandeshwar RX_FLAG_IV_STRIPPED |
372a7cfbb18SPavankumar Nandeshwar RX_FLAG_MMIC_STRIPPED);
373a7cfbb18SPavankumar Nandeshwar
374a7cfbb18SPavankumar Nandeshwar if (err_bitmap & HAL_RX_MPDU_ERR_FCS)
375a7cfbb18SPavankumar Nandeshwar rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
376a7cfbb18SPavankumar Nandeshwar if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC)
377a7cfbb18SPavankumar Nandeshwar rx_status->flag |= RX_FLAG_MMIC_ERROR;
378a7cfbb18SPavankumar Nandeshwar
379a7cfbb18SPavankumar Nandeshwar if (is_decrypted) {
380a7cfbb18SPavankumar Nandeshwar rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
381a7cfbb18SPavankumar Nandeshwar
382a7cfbb18SPavankumar Nandeshwar if (rx_info->is_mcbc)
383a7cfbb18SPavankumar Nandeshwar rx_status->flag |= RX_FLAG_MIC_STRIPPED |
384a7cfbb18SPavankumar Nandeshwar RX_FLAG_ICV_STRIPPED;
385a7cfbb18SPavankumar Nandeshwar else
386a7cfbb18SPavankumar Nandeshwar rx_status->flag |= RX_FLAG_IV_STRIPPED |
387a7cfbb18SPavankumar Nandeshwar RX_FLAG_PN_VALIDATED;
388a7cfbb18SPavankumar Nandeshwar }
389a7cfbb18SPavankumar Nandeshwar
390972f34d5SPavankumar Nandeshwar ath12k_wifi7_dp_rx_h_csum_offload(msdu, rx_info);
3919e0ee04fSRipan Deuri ath12k_dp_rx_h_undecap(dp_pdev, msdu, rx_desc,
392e8a1e49cSPavankumar Nandeshwar enctype, is_decrypted, rx_info);
393a7cfbb18SPavankumar Nandeshwar
394a7cfbb18SPavankumar Nandeshwar if (!is_decrypted || rx_info->is_mcbc)
395a7cfbb18SPavankumar Nandeshwar return;
396a7cfbb18SPavankumar Nandeshwar
397a7cfbb18SPavankumar Nandeshwar if (rx_info->decap_type != DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
398a7cfbb18SPavankumar Nandeshwar hdr = (void *)msdu->data;
399a7cfbb18SPavankumar Nandeshwar hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
400a7cfbb18SPavankumar Nandeshwar }
401a7cfbb18SPavankumar Nandeshwar }
402a7cfbb18SPavankumar Nandeshwar
ath12k_wifi7_dp_rx_msdu_coalesce(struct ath12k_hal * hal,struct sk_buff_head * msdu_list,struct sk_buff * first,struct sk_buff * last,u8 l3pad_bytes,int msdu_len,struct hal_rx_desc_data * rx_info)40396b42732SPavankumar Nandeshwar static int ath12k_wifi7_dp_rx_msdu_coalesce(struct ath12k_hal *hal,
404a7cfbb18SPavankumar Nandeshwar struct sk_buff_head *msdu_list,
405a7cfbb18SPavankumar Nandeshwar struct sk_buff *first, struct sk_buff *last,
406e8a1e49cSPavankumar Nandeshwar u8 l3pad_bytes, int msdu_len,
407e8a1e49cSPavankumar Nandeshwar struct hal_rx_desc_data *rx_info)
408a7cfbb18SPavankumar Nandeshwar {
409a7cfbb18SPavankumar Nandeshwar struct sk_buff *skb;
410a7cfbb18SPavankumar Nandeshwar struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first);
411a7cfbb18SPavankumar Nandeshwar int buf_first_hdr_len, buf_first_len;
412a7cfbb18SPavankumar Nandeshwar struct hal_rx_desc *ldesc;
413a7cfbb18SPavankumar Nandeshwar int space_extra, rem_len, buf_len;
41496b42732SPavankumar Nandeshwar u32 hal_rx_desc_sz = hal->hal_desc_sz;
415a7cfbb18SPavankumar Nandeshwar bool is_continuation;
416a7cfbb18SPavankumar Nandeshwar
417a7cfbb18SPavankumar Nandeshwar /* As the msdu is spread across multiple rx buffers,
418a7cfbb18SPavankumar Nandeshwar * find the offset to the start of msdu for computing
419a7cfbb18SPavankumar Nandeshwar * the length of the msdu in the first buffer.
420a7cfbb18SPavankumar Nandeshwar */
421a7cfbb18SPavankumar Nandeshwar buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes;
422a7cfbb18SPavankumar Nandeshwar buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
423a7cfbb18SPavankumar Nandeshwar
424a7cfbb18SPavankumar Nandeshwar if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
425a7cfbb18SPavankumar Nandeshwar skb_put(first, buf_first_hdr_len + msdu_len);
426a7cfbb18SPavankumar Nandeshwar skb_pull(first, buf_first_hdr_len);
427a7cfbb18SPavankumar Nandeshwar return 0;
428a7cfbb18SPavankumar Nandeshwar }
429a7cfbb18SPavankumar Nandeshwar
430a7cfbb18SPavankumar Nandeshwar ldesc = (struct hal_rx_desc *)last->data;
431e8a1e49cSPavankumar Nandeshwar rxcb->is_first_msdu = rx_info->is_first_msdu;
432e8a1e49cSPavankumar Nandeshwar rxcb->is_last_msdu = rx_info->is_last_msdu;
433a7cfbb18SPavankumar Nandeshwar
434a7cfbb18SPavankumar Nandeshwar /* MSDU spans over multiple buffers because the length of the MSDU
435a7cfbb18SPavankumar Nandeshwar * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
436a7cfbb18SPavankumar Nandeshwar * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
437a7cfbb18SPavankumar Nandeshwar */
438a7cfbb18SPavankumar Nandeshwar skb_put(first, DP_RX_BUFFER_SIZE);
439a7cfbb18SPavankumar Nandeshwar skb_pull(first, buf_first_hdr_len);
440a7cfbb18SPavankumar Nandeshwar
441a7cfbb18SPavankumar Nandeshwar /* When an MSDU spread over multiple buffers MSDU_END
442a7cfbb18SPavankumar Nandeshwar * tlvs are valid only in the last buffer. Copy those tlvs.
443a7cfbb18SPavankumar Nandeshwar */
44496b42732SPavankumar Nandeshwar ath12k_dp_rx_desc_end_tlv_copy(hal, rxcb->rx_desc, ldesc);
445a7cfbb18SPavankumar Nandeshwar
446a7cfbb18SPavankumar Nandeshwar space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
447a7cfbb18SPavankumar Nandeshwar if (space_extra > 0 &&
448a7cfbb18SPavankumar Nandeshwar (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
449a7cfbb18SPavankumar Nandeshwar /* Free up all buffers of the MSDU */
450a7cfbb18SPavankumar Nandeshwar while ((skb = __skb_dequeue(msdu_list)) != NULL) {
451a7cfbb18SPavankumar Nandeshwar rxcb = ATH12K_SKB_RXCB(skb);
452a7cfbb18SPavankumar Nandeshwar if (!rxcb->is_continuation) {
453a7cfbb18SPavankumar Nandeshwar dev_kfree_skb_any(skb);
454a7cfbb18SPavankumar Nandeshwar break;
455a7cfbb18SPavankumar Nandeshwar }
456a7cfbb18SPavankumar Nandeshwar dev_kfree_skb_any(skb);
457a7cfbb18SPavankumar Nandeshwar }
458a7cfbb18SPavankumar Nandeshwar return -ENOMEM;
459a7cfbb18SPavankumar Nandeshwar }
460a7cfbb18SPavankumar Nandeshwar
461a7cfbb18SPavankumar Nandeshwar rem_len = msdu_len - buf_first_len;
462a7cfbb18SPavankumar Nandeshwar while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
463a7cfbb18SPavankumar Nandeshwar rxcb = ATH12K_SKB_RXCB(skb);
464a7cfbb18SPavankumar Nandeshwar is_continuation = rxcb->is_continuation;
465a7cfbb18SPavankumar Nandeshwar if (is_continuation)
466a7cfbb18SPavankumar Nandeshwar buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
467a7cfbb18SPavankumar Nandeshwar else
468a7cfbb18SPavankumar Nandeshwar buf_len = rem_len;
469a7cfbb18SPavankumar Nandeshwar
470a7cfbb18SPavankumar Nandeshwar if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) {
471a7cfbb18SPavankumar Nandeshwar WARN_ON_ONCE(1);
472a7cfbb18SPavankumar Nandeshwar dev_kfree_skb_any(skb);
473a7cfbb18SPavankumar Nandeshwar return -EINVAL;
474a7cfbb18SPavankumar Nandeshwar }
475a7cfbb18SPavankumar Nandeshwar
476a7cfbb18SPavankumar Nandeshwar skb_put(skb, buf_len + hal_rx_desc_sz);
477a7cfbb18SPavankumar Nandeshwar skb_pull(skb, hal_rx_desc_sz);
478a7cfbb18SPavankumar Nandeshwar skb_copy_from_linear_data(skb, skb_put(first, buf_len),
479a7cfbb18SPavankumar Nandeshwar buf_len);
480a7cfbb18SPavankumar Nandeshwar dev_kfree_skb_any(skb);
481a7cfbb18SPavankumar Nandeshwar
482a7cfbb18SPavankumar Nandeshwar rem_len -= buf_len;
483a7cfbb18SPavankumar Nandeshwar if (!is_continuation)
484a7cfbb18SPavankumar Nandeshwar break;
485a7cfbb18SPavankumar Nandeshwar }
486a7cfbb18SPavankumar Nandeshwar
487a7cfbb18SPavankumar Nandeshwar return 0;
488a7cfbb18SPavankumar Nandeshwar }
489a7cfbb18SPavankumar Nandeshwar
ath12k_wifi7_dp_rx_process_msdu(struct ath12k_pdev_dp * dp_pdev,struct sk_buff * msdu,struct sk_buff_head * msdu_list,struct hal_rx_desc_data * rx_info)4909e0ee04fSRipan Deuri static int ath12k_wifi7_dp_rx_process_msdu(struct ath12k_pdev_dp *dp_pdev,
491a7cfbb18SPavankumar Nandeshwar struct sk_buff *msdu,
492a7cfbb18SPavankumar Nandeshwar struct sk_buff_head *msdu_list,
493e8a1e49cSPavankumar Nandeshwar struct hal_rx_desc_data *rx_info)
494a7cfbb18SPavankumar Nandeshwar {
4959e0ee04fSRipan Deuri struct ath12k_dp *dp = dp_pdev->dp;
496a7cfbb18SPavankumar Nandeshwar struct hal_rx_desc *rx_desc, *lrx_desc;
497a7cfbb18SPavankumar Nandeshwar struct ath12k_skb_rxcb *rxcb;
498a7cfbb18SPavankumar Nandeshwar struct sk_buff *last_buf;
49996b42732SPavankumar Nandeshwar struct ath12k_hal *hal = dp->hal;
500a7cfbb18SPavankumar Nandeshwar u8 l3_pad_bytes;
501a7cfbb18SPavankumar Nandeshwar u16 msdu_len;
502a7cfbb18SPavankumar Nandeshwar int ret;
50396b42732SPavankumar Nandeshwar u32 hal_rx_desc_sz = hal->hal_desc_sz;
504a7cfbb18SPavankumar Nandeshwar
505a7cfbb18SPavankumar Nandeshwar last_buf = ath12k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
506a7cfbb18SPavankumar Nandeshwar if (!last_buf) {
50796b42732SPavankumar Nandeshwar ath12k_warn(dp->ab,
508a7cfbb18SPavankumar Nandeshwar "No valid Rx buffer to access MSDU_END tlv\n");
509a7cfbb18SPavankumar Nandeshwar ret = -EIO;
510a7cfbb18SPavankumar Nandeshwar goto free_out;
511a7cfbb18SPavankumar Nandeshwar }
512a7cfbb18SPavankumar Nandeshwar
513a7cfbb18SPavankumar Nandeshwar rx_desc = (struct hal_rx_desc *)msdu->data;
514a7cfbb18SPavankumar Nandeshwar lrx_desc = (struct hal_rx_desc *)last_buf->data;
515e8a1e49cSPavankumar Nandeshwar
5167cd7392aSAlok Singh ath12k_dp_extract_rx_desc_data(hal, rx_info, rx_desc, lrx_desc);
517e8a1e49cSPavankumar Nandeshwar if (!rx_info->msdu_done) {
51896b42732SPavankumar Nandeshwar ath12k_warn(dp->ab, "msdu_done bit in msdu_end is not set\n");
519a7cfbb18SPavankumar Nandeshwar ret = -EIO;
520a7cfbb18SPavankumar Nandeshwar goto free_out;
521a7cfbb18SPavankumar Nandeshwar }
522a7cfbb18SPavankumar Nandeshwar
523a7cfbb18SPavankumar Nandeshwar rxcb = ATH12K_SKB_RXCB(msdu);
524a7cfbb18SPavankumar Nandeshwar rxcb->rx_desc = rx_desc;
525e8a1e49cSPavankumar Nandeshwar msdu_len = rx_info->msdu_len;
526e8a1e49cSPavankumar Nandeshwar l3_pad_bytes = rx_info->l3_pad_bytes;
527a7cfbb18SPavankumar Nandeshwar
528a7cfbb18SPavankumar Nandeshwar if (rxcb->is_frag) {
529a7cfbb18SPavankumar Nandeshwar skb_pull(msdu, hal_rx_desc_sz);
530a7cfbb18SPavankumar Nandeshwar } else if (!rxcb->is_continuation) {
531a7cfbb18SPavankumar Nandeshwar if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
532a7cfbb18SPavankumar Nandeshwar ret = -EINVAL;
53396b42732SPavankumar Nandeshwar ath12k_warn(dp->ab, "invalid msdu len %u\n", msdu_len);
53496b42732SPavankumar Nandeshwar ath12k_dbg_dump(dp->ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
535a7cfbb18SPavankumar Nandeshwar sizeof(*rx_desc));
536a7cfbb18SPavankumar Nandeshwar goto free_out;
537a7cfbb18SPavankumar Nandeshwar }
538a7cfbb18SPavankumar Nandeshwar skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len);
539a7cfbb18SPavankumar Nandeshwar skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes);
540a7cfbb18SPavankumar Nandeshwar } else {
54196b42732SPavankumar Nandeshwar ret = ath12k_wifi7_dp_rx_msdu_coalesce(hal, msdu_list,
542a7cfbb18SPavankumar Nandeshwar msdu, last_buf,
543e8a1e49cSPavankumar Nandeshwar l3_pad_bytes, msdu_len,
544e8a1e49cSPavankumar Nandeshwar rx_info);
545a7cfbb18SPavankumar Nandeshwar if (ret) {
54696b42732SPavankumar Nandeshwar ath12k_warn(dp->ab,
547a7cfbb18SPavankumar Nandeshwar "failed to coalesce msdu rx buffer%d\n", ret);
548a7cfbb18SPavankumar Nandeshwar goto free_out;
549a7cfbb18SPavankumar Nandeshwar }
550a7cfbb18SPavankumar Nandeshwar }
551a7cfbb18SPavankumar Nandeshwar
552775fe5acSPavankumar Nandeshwar if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(dp, rx_desc, msdu,
553e8a1e49cSPavankumar Nandeshwar rx_info))) {
554a7cfbb18SPavankumar Nandeshwar ret = -EINVAL;
555a7cfbb18SPavankumar Nandeshwar goto free_out;
556a7cfbb18SPavankumar Nandeshwar }
557a7cfbb18SPavankumar Nandeshwar
5589e0ee04fSRipan Deuri ath12k_dp_rx_h_ppdu(dp_pdev, rx_info);
5599e0ee04fSRipan Deuri ath12k_wifi7_dp_rx_h_mpdu(dp_pdev, msdu, rx_desc, rx_info);
560a7cfbb18SPavankumar Nandeshwar
561a7cfbb18SPavankumar Nandeshwar rx_info->rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
562a7cfbb18SPavankumar Nandeshwar
563a7cfbb18SPavankumar Nandeshwar return 0;
564a7cfbb18SPavankumar Nandeshwar
565a7cfbb18SPavankumar Nandeshwar free_out:
566a7cfbb18SPavankumar Nandeshwar return ret;
567a7cfbb18SPavankumar Nandeshwar }
568a7cfbb18SPavankumar Nandeshwar
569972f34d5SPavankumar Nandeshwar static void
ath12k_wifi7_dp_rx_process_received_packets(struct ath12k_dp * dp,struct napi_struct * napi,struct sk_buff_head * msdu_list,int ring_id)57096b42732SPavankumar Nandeshwar ath12k_wifi7_dp_rx_process_received_packets(struct ath12k_dp *dp,
571a7cfbb18SPavankumar Nandeshwar struct napi_struct *napi,
572a7cfbb18SPavankumar Nandeshwar struct sk_buff_head *msdu_list,
573a7cfbb18SPavankumar Nandeshwar int ring_id)
574a7cfbb18SPavankumar Nandeshwar {
5758042e30aSRipan Deuri struct ath12k_hw_group *ag = dp->ag;
5768042e30aSRipan Deuri struct ath12k_dp_hw_group *dp_hw_grp = &ag->dp_hw_grp;
577a7cfbb18SPavankumar Nandeshwar struct ieee80211_rx_status rx_status = {};
578a7cfbb18SPavankumar Nandeshwar struct ath12k_skb_rxcb *rxcb;
579a7cfbb18SPavankumar Nandeshwar struct sk_buff *msdu;
580a7cfbb18SPavankumar Nandeshwar struct ath12k *ar;
5819e0ee04fSRipan Deuri struct ath12k_pdev_dp *dp_pdev;
582a7cfbb18SPavankumar Nandeshwar struct ath12k_hw_link *hw_links = ag->hw_links;
583a7cfbb18SPavankumar Nandeshwar struct ath12k_base *partner_ab;
584e8a1e49cSPavankumar Nandeshwar struct hal_rx_desc_data rx_info;
5858042e30aSRipan Deuri struct ath12k_dp *partner_dp;
5869e0ee04fSRipan Deuri u8 hw_link_id, pdev_idx;
587a7cfbb18SPavankumar Nandeshwar int ret;
588a7cfbb18SPavankumar Nandeshwar
589a7cfbb18SPavankumar Nandeshwar if (skb_queue_empty(msdu_list))
590a7cfbb18SPavankumar Nandeshwar return;
591a7cfbb18SPavankumar Nandeshwar
592a7cfbb18SPavankumar Nandeshwar rx_info.addr2_present = false;
593a7cfbb18SPavankumar Nandeshwar rx_info.rx_status = &rx_status;
594a7cfbb18SPavankumar Nandeshwar
595a7cfbb18SPavankumar Nandeshwar rcu_read_lock();
596a7cfbb18SPavankumar Nandeshwar
597a7cfbb18SPavankumar Nandeshwar while ((msdu = __skb_dequeue(msdu_list))) {
598a7cfbb18SPavankumar Nandeshwar rxcb = ATH12K_SKB_RXCB(msdu);
599a7cfbb18SPavankumar Nandeshwar hw_link_id = rxcb->hw_link_id;
6008042e30aSRipan Deuri partner_dp = ath12k_dp_hw_grp_to_dp(dp_hw_grp,
601a7cfbb18SPavankumar Nandeshwar hw_links[hw_link_id].device_id);
6029e0ee04fSRipan Deuri pdev_idx = ath12k_hw_mac_id_to_pdev_id(partner_dp->hw_params,
603a7cfbb18SPavankumar Nandeshwar hw_links[hw_link_id].pdev_idx);
6048042e30aSRipan Deuri partner_ab = partner_dp->ab;
6059e0ee04fSRipan Deuri ar = partner_ab->pdevs[pdev_idx].ar;
6069e0ee04fSRipan Deuri if (!rcu_dereference(partner_ab->pdevs_active[pdev_idx])) {
607a7cfbb18SPavankumar Nandeshwar dev_kfree_skb_any(msdu);
608a7cfbb18SPavankumar Nandeshwar continue;
609a7cfbb18SPavankumar Nandeshwar }
610a7cfbb18SPavankumar Nandeshwar
611a7cfbb18SPavankumar Nandeshwar if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
612a7cfbb18SPavankumar Nandeshwar dev_kfree_skb_any(msdu);
613a7cfbb18SPavankumar Nandeshwar continue;
614a7cfbb18SPavankumar Nandeshwar }
615a7cfbb18SPavankumar Nandeshwar
6169e0ee04fSRipan Deuri dp_pdev = ath12k_dp_to_pdev_dp(partner_dp, pdev_idx);
6179e0ee04fSRipan Deuri if (!dp_pdev) {
6189e0ee04fSRipan Deuri dev_kfree_skb_any(msdu);
6199e0ee04fSRipan Deuri continue;
6209e0ee04fSRipan Deuri }
6219e0ee04fSRipan Deuri
6229e0ee04fSRipan Deuri ret = ath12k_wifi7_dp_rx_process_msdu(dp_pdev, msdu, msdu_list, &rx_info);
623a7cfbb18SPavankumar Nandeshwar if (ret) {
62496b42732SPavankumar Nandeshwar ath12k_dbg(dp->ab, ATH12K_DBG_DATA,
625a7cfbb18SPavankumar Nandeshwar "Unable to process msdu %d", ret);
626a7cfbb18SPavankumar Nandeshwar dev_kfree_skb_any(msdu);
627a7cfbb18SPavankumar Nandeshwar continue;
628a7cfbb18SPavankumar Nandeshwar }
629a7cfbb18SPavankumar Nandeshwar
6309e0ee04fSRipan Deuri ath12k_dp_rx_deliver_msdu(dp_pdev, napi, msdu, &rx_info);
631a7cfbb18SPavankumar Nandeshwar }
632a7cfbb18SPavankumar Nandeshwar
633a7cfbb18SPavankumar Nandeshwar rcu_read_unlock();
634a7cfbb18SPavankumar Nandeshwar }
635a7cfbb18SPavankumar Nandeshwar
ath12k_wifi7_dp_rx_process(struct ath12k_dp * dp,int ring_id,struct napi_struct * napi,int budget)63696b42732SPavankumar Nandeshwar int ath12k_wifi7_dp_rx_process(struct ath12k_dp *dp, int ring_id,
637a7cfbb18SPavankumar Nandeshwar struct napi_struct *napi, int budget)
638a7cfbb18SPavankumar Nandeshwar {
6398042e30aSRipan Deuri struct ath12k_hw_group *ag = dp->ag;
64096b42732SPavankumar Nandeshwar struct ath12k_base *ab = dp->ab;
64196b42732SPavankumar Nandeshwar struct ath12k_hal *hal = dp->hal;
6428042e30aSRipan Deuri struct ath12k_dp_hw_group *dp_hw_grp = &ag->dp_hw_grp;
643a7cfbb18SPavankumar Nandeshwar struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
644a7cfbb18SPavankumar Nandeshwar struct ath12k_hw_link *hw_links = ag->hw_links;
645a7cfbb18SPavankumar Nandeshwar int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
646a7cfbb18SPavankumar Nandeshwar struct ath12k_rx_desc_info *desc_info;
647a7cfbb18SPavankumar Nandeshwar struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
648a7cfbb18SPavankumar Nandeshwar struct hal_reo_dest_ring *desc;
6493a52762bSRipan Deuri struct ath12k_dp *partner_dp;
650a7cfbb18SPavankumar Nandeshwar struct sk_buff_head msdu_list;
651a7cfbb18SPavankumar Nandeshwar struct ath12k_skb_rxcb *rxcb;
652a7cfbb18SPavankumar Nandeshwar int total_msdu_reaped = 0;
653a7cfbb18SPavankumar Nandeshwar u8 hw_link_id, device_id;
654a7cfbb18SPavankumar Nandeshwar struct hal_srng *srng;
655a7cfbb18SPavankumar Nandeshwar struct sk_buff *msdu;
656a7cfbb18SPavankumar Nandeshwar bool done = false;
657a7cfbb18SPavankumar Nandeshwar u64 desc_va;
658a7cfbb18SPavankumar Nandeshwar
659a7cfbb18SPavankumar Nandeshwar __skb_queue_head_init(&msdu_list);
660a7cfbb18SPavankumar Nandeshwar
661a7cfbb18SPavankumar Nandeshwar for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
662a7cfbb18SPavankumar Nandeshwar INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
663a7cfbb18SPavankumar Nandeshwar
66496b42732SPavankumar Nandeshwar srng = &hal->srng_list[dp->reo_dst_ring[ring_id].ring_id];
665a7cfbb18SPavankumar Nandeshwar
666a7cfbb18SPavankumar Nandeshwar spin_lock_bh(&srng->lock);
667a7cfbb18SPavankumar Nandeshwar
668a7cfbb18SPavankumar Nandeshwar try_again:
669a7cfbb18SPavankumar Nandeshwar ath12k_hal_srng_access_begin(ab, srng);
670a7cfbb18SPavankumar Nandeshwar
671a7cfbb18SPavankumar Nandeshwar while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
672a7cfbb18SPavankumar Nandeshwar struct rx_mpdu_desc *mpdu_info;
673a7cfbb18SPavankumar Nandeshwar struct rx_msdu_desc *msdu_info;
674a7cfbb18SPavankumar Nandeshwar enum hal_reo_dest_ring_push_reason push_reason;
675a7cfbb18SPavankumar Nandeshwar u32 cookie;
676a7cfbb18SPavankumar Nandeshwar
677a7cfbb18SPavankumar Nandeshwar cookie = le32_get_bits(desc->buf_addr_info.info1,
678a7cfbb18SPavankumar Nandeshwar BUFFER_ADDR_INFO1_SW_COOKIE);
679a7cfbb18SPavankumar Nandeshwar
680a7cfbb18SPavankumar Nandeshwar hw_link_id = le32_get_bits(desc->info0,
681a7cfbb18SPavankumar Nandeshwar HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
682a7cfbb18SPavankumar Nandeshwar
683a7cfbb18SPavankumar Nandeshwar desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
684a7cfbb18SPavankumar Nandeshwar le32_to_cpu(desc->buf_va_lo));
685a7cfbb18SPavankumar Nandeshwar desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
686a7cfbb18SPavankumar Nandeshwar
687a7cfbb18SPavankumar Nandeshwar device_id = hw_links[hw_link_id].device_id;
6888042e30aSRipan Deuri partner_dp = ath12k_dp_hw_grp_to_dp(dp_hw_grp, device_id);
6898042e30aSRipan Deuri if (unlikely(!partner_dp)) {
690a7cfbb18SPavankumar Nandeshwar if (desc_info->skb) {
691a7cfbb18SPavankumar Nandeshwar dev_kfree_skb_any(desc_info->skb);
692a7cfbb18SPavankumar Nandeshwar desc_info->skb = NULL;
693a7cfbb18SPavankumar Nandeshwar }
694a7cfbb18SPavankumar Nandeshwar
695a7cfbb18SPavankumar Nandeshwar continue;
696a7cfbb18SPavankumar Nandeshwar }
697a7cfbb18SPavankumar Nandeshwar
698a7cfbb18SPavankumar Nandeshwar /* retry manual desc retrieval */
699a7cfbb18SPavankumar Nandeshwar if (!desc_info) {
70096b42732SPavankumar Nandeshwar desc_info = ath12k_dp_get_rx_desc(partner_dp, cookie);
701a7cfbb18SPavankumar Nandeshwar if (!desc_info) {
70296b42732SPavankumar Nandeshwar ath12k_warn(partner_dp->ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n",
703a7cfbb18SPavankumar Nandeshwar cookie);
704a7cfbb18SPavankumar Nandeshwar continue;
705a7cfbb18SPavankumar Nandeshwar }
706a7cfbb18SPavankumar Nandeshwar }
707a7cfbb18SPavankumar Nandeshwar
708a7cfbb18SPavankumar Nandeshwar if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
709a7cfbb18SPavankumar Nandeshwar ath12k_warn(ab, "Check HW CC implementation");
710a7cfbb18SPavankumar Nandeshwar
711a7cfbb18SPavankumar Nandeshwar msdu = desc_info->skb;
712a7cfbb18SPavankumar Nandeshwar desc_info->skb = NULL;
713a7cfbb18SPavankumar Nandeshwar
714a7cfbb18SPavankumar Nandeshwar list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]);
715a7cfbb18SPavankumar Nandeshwar
716a7cfbb18SPavankumar Nandeshwar rxcb = ATH12K_SKB_RXCB(msdu);
71796b42732SPavankumar Nandeshwar dma_unmap_single(partner_dp->dev, rxcb->paddr,
718a7cfbb18SPavankumar Nandeshwar msdu->len + skb_tailroom(msdu),
719a7cfbb18SPavankumar Nandeshwar DMA_FROM_DEVICE);
720a7cfbb18SPavankumar Nandeshwar
721a7cfbb18SPavankumar Nandeshwar num_buffs_reaped[device_id]++;
72296b42732SPavankumar Nandeshwar dp->device_stats.reo_rx[ring_id][dp->device_id]++;
723a7cfbb18SPavankumar Nandeshwar
724a7cfbb18SPavankumar Nandeshwar push_reason = le32_get_bits(desc->info0,
725a7cfbb18SPavankumar Nandeshwar HAL_REO_DEST_RING_INFO0_PUSH_REASON);
726a7cfbb18SPavankumar Nandeshwar if (push_reason !=
727a7cfbb18SPavankumar Nandeshwar HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
728a7cfbb18SPavankumar Nandeshwar dev_kfree_skb_any(msdu);
729775fe5acSPavankumar Nandeshwar dp->device_stats.hal_reo_error[ring_id]++;
730a7cfbb18SPavankumar Nandeshwar continue;
731a7cfbb18SPavankumar Nandeshwar }
732a7cfbb18SPavankumar Nandeshwar
733a7cfbb18SPavankumar Nandeshwar msdu_info = &desc->rx_msdu_info;
734a7cfbb18SPavankumar Nandeshwar mpdu_info = &desc->rx_mpdu_info;
735a7cfbb18SPavankumar Nandeshwar
736a7cfbb18SPavankumar Nandeshwar rxcb->is_first_msdu = !!(le32_to_cpu(msdu_info->info0) &
737a7cfbb18SPavankumar Nandeshwar RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
738a7cfbb18SPavankumar Nandeshwar rxcb->is_last_msdu = !!(le32_to_cpu(msdu_info->info0) &
739a7cfbb18SPavankumar Nandeshwar RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
740a7cfbb18SPavankumar Nandeshwar rxcb->is_continuation = !!(le32_to_cpu(msdu_info->info0) &
741a7cfbb18SPavankumar Nandeshwar RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
742a7cfbb18SPavankumar Nandeshwar rxcb->hw_link_id = hw_link_id;
743147daefcSPavankumar Nandeshwar rxcb->peer_id = ath12k_wifi7_dp_rx_get_peer_id(dp, dp->peer_metadata_ver,
744a7cfbb18SPavankumar Nandeshwar mpdu_info->peer_meta_data);
745a7cfbb18SPavankumar Nandeshwar rxcb->tid = le32_get_bits(mpdu_info->info0,
746a7cfbb18SPavankumar Nandeshwar RX_MPDU_DESC_INFO0_TID);
747a7cfbb18SPavankumar Nandeshwar
748a7cfbb18SPavankumar Nandeshwar __skb_queue_tail(&msdu_list, msdu);
749a7cfbb18SPavankumar Nandeshwar
750a7cfbb18SPavankumar Nandeshwar if (!rxcb->is_continuation) {
751a7cfbb18SPavankumar Nandeshwar total_msdu_reaped++;
752a7cfbb18SPavankumar Nandeshwar done = true;
753a7cfbb18SPavankumar Nandeshwar } else {
754a7cfbb18SPavankumar Nandeshwar done = false;
755a7cfbb18SPavankumar Nandeshwar }
756a7cfbb18SPavankumar Nandeshwar
757a7cfbb18SPavankumar Nandeshwar if (total_msdu_reaped >= budget)
758a7cfbb18SPavankumar Nandeshwar break;
759a7cfbb18SPavankumar Nandeshwar }
760a7cfbb18SPavankumar Nandeshwar
761a7cfbb18SPavankumar Nandeshwar /* Hw might have updated the head pointer after we cached it.
762a7cfbb18SPavankumar Nandeshwar * In this case, even though there are entries in the ring we'll
763a7cfbb18SPavankumar Nandeshwar * get rx_desc NULL. Give the read another try with updated cached
764a7cfbb18SPavankumar Nandeshwar * head pointer so that we can reap complete MPDU in the current
765a7cfbb18SPavankumar Nandeshwar * rx processing.
766a7cfbb18SPavankumar Nandeshwar */
767a7cfbb18SPavankumar Nandeshwar if (!done && ath12k_hal_srng_dst_num_free(ab, srng, true)) {
768a7cfbb18SPavankumar Nandeshwar ath12k_hal_srng_access_end(ab, srng);
769a7cfbb18SPavankumar Nandeshwar goto try_again;
770a7cfbb18SPavankumar Nandeshwar }
771a7cfbb18SPavankumar Nandeshwar
772a7cfbb18SPavankumar Nandeshwar ath12k_hal_srng_access_end(ab, srng);
773a7cfbb18SPavankumar Nandeshwar
774a7cfbb18SPavankumar Nandeshwar spin_unlock_bh(&srng->lock);
775a7cfbb18SPavankumar Nandeshwar
776a7cfbb18SPavankumar Nandeshwar if (!total_msdu_reaped)
777a7cfbb18SPavankumar Nandeshwar goto exit;
778a7cfbb18SPavankumar Nandeshwar
779a7cfbb18SPavankumar Nandeshwar for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
780a7cfbb18SPavankumar Nandeshwar if (!num_buffs_reaped[device_id])
781a7cfbb18SPavankumar Nandeshwar continue;
782a7cfbb18SPavankumar Nandeshwar
7838042e30aSRipan Deuri partner_dp = ath12k_dp_hw_grp_to_dp(dp_hw_grp, device_id);
7843a52762bSRipan Deuri rx_ring = &partner_dp->rx_refill_buf_ring;
785a7cfbb18SPavankumar Nandeshwar
78696b42732SPavankumar Nandeshwar ath12k_dp_rx_bufs_replenish(partner_dp, rx_ring,
787a7cfbb18SPavankumar Nandeshwar &rx_desc_used_list[device_id],
788a7cfbb18SPavankumar Nandeshwar num_buffs_reaped[device_id]);
789a7cfbb18SPavankumar Nandeshwar }
790a7cfbb18SPavankumar Nandeshwar
79196b42732SPavankumar Nandeshwar ath12k_wifi7_dp_rx_process_received_packets(dp, napi, &msdu_list,
792a7cfbb18SPavankumar Nandeshwar ring_id);
793a7cfbb18SPavankumar Nandeshwar
794a7cfbb18SPavankumar Nandeshwar exit:
795a7cfbb18SPavankumar Nandeshwar return total_msdu_reaped;
796a7cfbb18SPavankumar Nandeshwar }
797a7cfbb18SPavankumar Nandeshwar
7986b4954d3SPavankumar Nandeshwar static bool
ath12k_wifi7_dp_rx_h_defrag_validate_incr_pn(struct ath12k_pdev_dp * dp_pdev,struct ath12k_dp_rx_tid * rx_tid,enum hal_encrypt_type encrypt_type)7999e0ee04fSRipan Deuri ath12k_wifi7_dp_rx_h_defrag_validate_incr_pn(struct ath12k_pdev_dp *dp_pdev,
800e8a1e49cSPavankumar Nandeshwar struct ath12k_dp_rx_tid *rx_tid,
801e8a1e49cSPavankumar Nandeshwar enum hal_encrypt_type encrypt_type)
8026b4954d3SPavankumar Nandeshwar {
8039e0ee04fSRipan Deuri struct ath12k_dp *dp = dp_pdev->dp;
8046b4954d3SPavankumar Nandeshwar struct sk_buff *first_frag, *skb;
8056b4954d3SPavankumar Nandeshwar u64 last_pn;
8066b4954d3SPavankumar Nandeshwar u64 cur_pn;
8076b4954d3SPavankumar Nandeshwar
8086b4954d3SPavankumar Nandeshwar first_frag = skb_peek(&rx_tid->rx_frags);
8096b4954d3SPavankumar Nandeshwar
8106b4954d3SPavankumar Nandeshwar if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
8116b4954d3SPavankumar Nandeshwar encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
8126b4954d3SPavankumar Nandeshwar encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
8136b4954d3SPavankumar Nandeshwar encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
8146b4954d3SPavankumar Nandeshwar return true;
8156b4954d3SPavankumar Nandeshwar
8169e0ee04fSRipan Deuri last_pn = ath12k_dp_rx_h_get_pn(dp, first_frag);
8176b4954d3SPavankumar Nandeshwar skb_queue_walk(&rx_tid->rx_frags, skb) {
8186b4954d3SPavankumar Nandeshwar if (skb == first_frag)
8196b4954d3SPavankumar Nandeshwar continue;
8206b4954d3SPavankumar Nandeshwar
8219e0ee04fSRipan Deuri cur_pn = ath12k_dp_rx_h_get_pn(dp, skb);
8226b4954d3SPavankumar Nandeshwar if (cur_pn != last_pn + 1)
8236b4954d3SPavankumar Nandeshwar return false;
8246b4954d3SPavankumar Nandeshwar last_pn = cur_pn;
8256b4954d3SPavankumar Nandeshwar }
8266b4954d3SPavankumar Nandeshwar return true;
8276b4954d3SPavankumar Nandeshwar }
8286b4954d3SPavankumar Nandeshwar
ath12k_wifi7_dp_rx_h_defrag_reo_reinject(struct ath12k_dp * dp,struct ath12k_dp_rx_tid * rx_tid,struct sk_buff * defrag_skb)8299e0ee04fSRipan Deuri static int ath12k_wifi7_dp_rx_h_defrag_reo_reinject(struct ath12k_dp *dp,
8306b4954d3SPavankumar Nandeshwar struct ath12k_dp_rx_tid *rx_tid,
8316b4954d3SPavankumar Nandeshwar struct sk_buff *defrag_skb)
8326b4954d3SPavankumar Nandeshwar {
8339e0ee04fSRipan Deuri struct ath12k_base *ab = dp->ab;
83496b42732SPavankumar Nandeshwar struct ath12k_hal *hal = dp->hal;
8356b4954d3SPavankumar Nandeshwar struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
8366b4954d3SPavankumar Nandeshwar struct hal_reo_entrance_ring *reo_ent_ring;
8376b4954d3SPavankumar Nandeshwar struct hal_reo_dest_ring *reo_dest_ring;
8386b4954d3SPavankumar Nandeshwar struct dp_link_desc_bank *link_desc_banks;
8396b4954d3SPavankumar Nandeshwar struct hal_rx_msdu_link *msdu_link;
8406b4954d3SPavankumar Nandeshwar struct hal_rx_msdu_details *msdu0;
8416b4954d3SPavankumar Nandeshwar struct hal_srng *srng;
8426b4954d3SPavankumar Nandeshwar dma_addr_t link_paddr, buf_paddr;
8436b4954d3SPavankumar Nandeshwar u32 desc_bank, msdu_info, msdu_ext_info, mpdu_info;
8446b4954d3SPavankumar Nandeshwar u32 cookie, hal_rx_desc_sz, dest_ring_info0, queue_addr_hi;
8456b4954d3SPavankumar Nandeshwar int ret;
8466b4954d3SPavankumar Nandeshwar struct ath12k_rx_desc_info *desc_info;
8476b4954d3SPavankumar Nandeshwar enum hal_rx_buf_return_buf_manager idle_link_rbm = dp->idle_link_rbm;
8486b4954d3SPavankumar Nandeshwar u8 dst_ind;
8496b4954d3SPavankumar Nandeshwar
85096b42732SPavankumar Nandeshwar hal_rx_desc_sz = hal->hal_desc_sz;
8516b4954d3SPavankumar Nandeshwar link_desc_banks = dp->link_desc_banks;
8526b4954d3SPavankumar Nandeshwar reo_dest_ring = rx_tid->dst_ring_desc;
8536b4954d3SPavankumar Nandeshwar
85496b42732SPavankumar Nandeshwar ath12k_wifi7_hal_rx_reo_ent_paddr_get(&reo_dest_ring->buf_addr_info,
8556b4954d3SPavankumar Nandeshwar &link_paddr, &cookie);
8566b4954d3SPavankumar Nandeshwar desc_bank = u32_get_bits(cookie, DP_LINK_DESC_BANK_MASK);
8576b4954d3SPavankumar Nandeshwar
8586b4954d3SPavankumar Nandeshwar msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
8596b4954d3SPavankumar Nandeshwar (link_paddr - link_desc_banks[desc_bank].paddr));
8606b4954d3SPavankumar Nandeshwar msdu0 = &msdu_link->msdu_link[0];
8616b4954d3SPavankumar Nandeshwar msdu_ext_info = le32_to_cpu(msdu0->rx_msdu_ext_info.info0);
8626b4954d3SPavankumar Nandeshwar dst_ind = u32_get_bits(msdu_ext_info, RX_MSDU_EXT_DESC_INFO0_REO_DEST_IND);
8636b4954d3SPavankumar Nandeshwar
8646b4954d3SPavankumar Nandeshwar memset(msdu0, 0, sizeof(*msdu0));
8656b4954d3SPavankumar Nandeshwar
8666b4954d3SPavankumar Nandeshwar msdu_info = u32_encode_bits(1, RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU) |
8676b4954d3SPavankumar Nandeshwar u32_encode_bits(1, RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU) |
8686b4954d3SPavankumar Nandeshwar u32_encode_bits(0, RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) |
8696b4954d3SPavankumar Nandeshwar u32_encode_bits(defrag_skb->len - hal_rx_desc_sz,
8706b4954d3SPavankumar Nandeshwar RX_MSDU_DESC_INFO0_MSDU_LENGTH) |
8716b4954d3SPavankumar Nandeshwar u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_SA) |
8726b4954d3SPavankumar Nandeshwar u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_DA);
8736b4954d3SPavankumar Nandeshwar msdu0->rx_msdu_info.info0 = cpu_to_le32(msdu_info);
8746b4954d3SPavankumar Nandeshwar msdu0->rx_msdu_ext_info.info0 = cpu_to_le32(msdu_ext_info);
8756b4954d3SPavankumar Nandeshwar
8766b4954d3SPavankumar Nandeshwar /* change msdu len in hal rx desc */
87796b42732SPavankumar Nandeshwar ath12k_dp_rxdesc_set_msdu_len(hal, rx_desc, defrag_skb->len - hal_rx_desc_sz);
8786b4954d3SPavankumar Nandeshwar
87996b42732SPavankumar Nandeshwar buf_paddr = dma_map_single(dp->dev, defrag_skb->data,
8806b4954d3SPavankumar Nandeshwar defrag_skb->len + skb_tailroom(defrag_skb),
8816b4954d3SPavankumar Nandeshwar DMA_TO_DEVICE);
88296b42732SPavankumar Nandeshwar if (dma_mapping_error(dp->dev, buf_paddr))
8836b4954d3SPavankumar Nandeshwar return -ENOMEM;
8846b4954d3SPavankumar Nandeshwar
8856b4954d3SPavankumar Nandeshwar spin_lock_bh(&dp->rx_desc_lock);
8866b4954d3SPavankumar Nandeshwar desc_info = list_first_entry_or_null(&dp->rx_desc_free_list,
8876b4954d3SPavankumar Nandeshwar struct ath12k_rx_desc_info,
8886b4954d3SPavankumar Nandeshwar list);
8896b4954d3SPavankumar Nandeshwar if (!desc_info) {
8906b4954d3SPavankumar Nandeshwar spin_unlock_bh(&dp->rx_desc_lock);
8916b4954d3SPavankumar Nandeshwar ath12k_warn(ab, "failed to find rx desc for reinject\n");
8926b4954d3SPavankumar Nandeshwar ret = -ENOMEM;
8936b4954d3SPavankumar Nandeshwar goto err_unmap_dma;
8946b4954d3SPavankumar Nandeshwar }
8956b4954d3SPavankumar Nandeshwar
8966b4954d3SPavankumar Nandeshwar desc_info->skb = defrag_skb;
8976b4954d3SPavankumar Nandeshwar desc_info->in_use = true;
8986b4954d3SPavankumar Nandeshwar
8996b4954d3SPavankumar Nandeshwar list_del(&desc_info->list);
9006b4954d3SPavankumar Nandeshwar spin_unlock_bh(&dp->rx_desc_lock);
9016b4954d3SPavankumar Nandeshwar
9026b4954d3SPavankumar Nandeshwar ATH12K_SKB_RXCB(defrag_skb)->paddr = buf_paddr;
9036b4954d3SPavankumar Nandeshwar
904972f34d5SPavankumar Nandeshwar ath12k_wifi7_hal_rx_buf_addr_info_set(&msdu0->buf_addr_info, buf_paddr,
9056b4954d3SPavankumar Nandeshwar desc_info->cookie,
9066b4954d3SPavankumar Nandeshwar HAL_RX_BUF_RBM_SW3_BM);
9076b4954d3SPavankumar Nandeshwar
9086b4954d3SPavankumar Nandeshwar /* Fill mpdu details into reo entrance ring */
90996b42732SPavankumar Nandeshwar srng = &hal->srng_list[dp->reo_reinject_ring.ring_id];
9106b4954d3SPavankumar Nandeshwar
9116b4954d3SPavankumar Nandeshwar spin_lock_bh(&srng->lock);
9126b4954d3SPavankumar Nandeshwar ath12k_hal_srng_access_begin(ab, srng);
9136b4954d3SPavankumar Nandeshwar
9146b4954d3SPavankumar Nandeshwar reo_ent_ring = ath12k_hal_srng_src_get_next_entry(ab, srng);
9156b4954d3SPavankumar Nandeshwar if (!reo_ent_ring) {
9166b4954d3SPavankumar Nandeshwar ath12k_hal_srng_access_end(ab, srng);
9176b4954d3SPavankumar Nandeshwar spin_unlock_bh(&srng->lock);
9186b4954d3SPavankumar Nandeshwar ret = -ENOSPC;
9196b4954d3SPavankumar Nandeshwar goto err_free_desc;
9206b4954d3SPavankumar Nandeshwar }
9216b4954d3SPavankumar Nandeshwar memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
9226b4954d3SPavankumar Nandeshwar
923972f34d5SPavankumar Nandeshwar ath12k_wifi7_hal_rx_buf_addr_info_set(&reo_ent_ring->buf_addr_info, link_paddr,
924972f34d5SPavankumar Nandeshwar cookie, idle_link_rbm);
9256b4954d3SPavankumar Nandeshwar
9266b4954d3SPavankumar Nandeshwar mpdu_info = u32_encode_bits(1, RX_MPDU_DESC_INFO0_MSDU_COUNT) |
9276b4954d3SPavankumar Nandeshwar u32_encode_bits(0, RX_MPDU_DESC_INFO0_FRAG_FLAG) |
9286b4954d3SPavankumar Nandeshwar u32_encode_bits(1, RX_MPDU_DESC_INFO0_RAW_MPDU) |
9296b4954d3SPavankumar Nandeshwar u32_encode_bits(1, RX_MPDU_DESC_INFO0_VALID_PN) |
9306b4954d3SPavankumar Nandeshwar u32_encode_bits(rx_tid->tid, RX_MPDU_DESC_INFO0_TID);
9316b4954d3SPavankumar Nandeshwar
9326b4954d3SPavankumar Nandeshwar reo_ent_ring->rx_mpdu_info.info0 = cpu_to_le32(mpdu_info);
9336b4954d3SPavankumar Nandeshwar reo_ent_ring->rx_mpdu_info.peer_meta_data =
9346b4954d3SPavankumar Nandeshwar reo_dest_ring->rx_mpdu_info.peer_meta_data;
9356b4954d3SPavankumar Nandeshwar
93696b42732SPavankumar Nandeshwar if (dp->hw_params->reoq_lut_support) {
9376b4954d3SPavankumar Nandeshwar reo_ent_ring->queue_addr_lo = reo_dest_ring->rx_mpdu_info.peer_meta_data;
9386b4954d3SPavankumar Nandeshwar queue_addr_hi = 0;
9396b4954d3SPavankumar Nandeshwar } else {
9406b4954d3SPavankumar Nandeshwar reo_ent_ring->queue_addr_lo =
9416b4954d3SPavankumar Nandeshwar cpu_to_le32(lower_32_bits(rx_tid->qbuf.paddr_aligned));
9426b4954d3SPavankumar Nandeshwar queue_addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
9436b4954d3SPavankumar Nandeshwar }
9446b4954d3SPavankumar Nandeshwar
9456b4954d3SPavankumar Nandeshwar reo_ent_ring->info0 = le32_encode_bits(queue_addr_hi,
9466b4954d3SPavankumar Nandeshwar HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI) |
9476b4954d3SPavankumar Nandeshwar le32_encode_bits(dst_ind,
9486b4954d3SPavankumar Nandeshwar HAL_REO_ENTR_RING_INFO0_DEST_IND);
9496b4954d3SPavankumar Nandeshwar
9506b4954d3SPavankumar Nandeshwar reo_ent_ring->info1 = le32_encode_bits(rx_tid->cur_sn,
9516b4954d3SPavankumar Nandeshwar HAL_REO_ENTR_RING_INFO1_MPDU_SEQ_NUM);
9526b4954d3SPavankumar Nandeshwar dest_ring_info0 = le32_get_bits(reo_dest_ring->info0,
9536b4954d3SPavankumar Nandeshwar HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
9546b4954d3SPavankumar Nandeshwar reo_ent_ring->info2 =
9556b4954d3SPavankumar Nandeshwar cpu_to_le32(u32_get_bits(dest_ring_info0,
9566b4954d3SPavankumar Nandeshwar HAL_REO_ENTR_RING_INFO2_SRC_LINK_ID));
9576b4954d3SPavankumar Nandeshwar
9586b4954d3SPavankumar Nandeshwar ath12k_hal_srng_access_end(ab, srng);
9596b4954d3SPavankumar Nandeshwar spin_unlock_bh(&srng->lock);
9606b4954d3SPavankumar Nandeshwar
9616b4954d3SPavankumar Nandeshwar return 0;
9626b4954d3SPavankumar Nandeshwar
9636b4954d3SPavankumar Nandeshwar err_free_desc:
9646b4954d3SPavankumar Nandeshwar spin_lock_bh(&dp->rx_desc_lock);
9656b4954d3SPavankumar Nandeshwar desc_info->in_use = false;
9666b4954d3SPavankumar Nandeshwar desc_info->skb = NULL;
9676b4954d3SPavankumar Nandeshwar list_add_tail(&desc_info->list, &dp->rx_desc_free_list);
9686b4954d3SPavankumar Nandeshwar spin_unlock_bh(&dp->rx_desc_lock);
9696b4954d3SPavankumar Nandeshwar err_unmap_dma:
97096b42732SPavankumar Nandeshwar dma_unmap_single(dp->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb),
9716b4954d3SPavankumar Nandeshwar DMA_TO_DEVICE);
9726b4954d3SPavankumar Nandeshwar return ret;
9736b4954d3SPavankumar Nandeshwar }
9746b4954d3SPavankumar Nandeshwar
ath12k_wifi7_dp_rx_h_verify_tkip_mic(struct ath12k_pdev_dp * dp_pdev,struct ath12k_dp_peer * peer,enum hal_encrypt_type enctype,struct sk_buff * msdu,struct hal_rx_desc_data * rx_info)9759e0ee04fSRipan Deuri static int ath12k_wifi7_dp_rx_h_verify_tkip_mic(struct ath12k_pdev_dp *dp_pdev,
97611157e09SHarsh Kumar Bijlani struct ath12k_dp_peer *peer,
977e8a1e49cSPavankumar Nandeshwar enum hal_encrypt_type enctype,
978e8a1e49cSPavankumar Nandeshwar struct sk_buff *msdu,
979e8a1e49cSPavankumar Nandeshwar struct hal_rx_desc_data *rx_info)
980a7cfbb18SPavankumar Nandeshwar {
9819e0ee04fSRipan Deuri struct ath12k_dp *dp = dp_pdev->dp;
98296b42732SPavankumar Nandeshwar struct ath12k_hal *hal = dp->hal;
983a7cfbb18SPavankumar Nandeshwar struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
984a7cfbb18SPavankumar Nandeshwar struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
985a7cfbb18SPavankumar Nandeshwar struct ieee80211_key_conf *key_conf;
986a7cfbb18SPavankumar Nandeshwar struct ieee80211_hdr *hdr;
987a7cfbb18SPavankumar Nandeshwar u8 mic[IEEE80211_CCMP_MIC_LEN];
988a7cfbb18SPavankumar Nandeshwar int head_len, tail_len, ret;
989a7cfbb18SPavankumar Nandeshwar size_t data_len;
99096b42732SPavankumar Nandeshwar u32 hdr_len, hal_rx_desc_sz = hal->hal_desc_sz;
991a7cfbb18SPavankumar Nandeshwar u8 *key, *data;
992a7cfbb18SPavankumar Nandeshwar u8 key_idx;
993a7cfbb18SPavankumar Nandeshwar
994e8a1e49cSPavankumar Nandeshwar if (enctype != HAL_ENCRYPT_TYPE_TKIP_MIC)
995a7cfbb18SPavankumar Nandeshwar return 0;
996a7cfbb18SPavankumar Nandeshwar
997e8a1e49cSPavankumar Nandeshwar rx_info->addr2_present = false;
998e8a1e49cSPavankumar Nandeshwar rx_info->rx_status = rxs;
999a7cfbb18SPavankumar Nandeshwar
1000a7cfbb18SPavankumar Nandeshwar hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
1001a7cfbb18SPavankumar Nandeshwar hdr_len = ieee80211_hdrlen(hdr->frame_control);
1002a7cfbb18SPavankumar Nandeshwar head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
1003a7cfbb18SPavankumar Nandeshwar tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
1004a7cfbb18SPavankumar Nandeshwar
1005a7cfbb18SPavankumar Nandeshwar if (!is_multicast_ether_addr(hdr->addr1))
1006a7cfbb18SPavankumar Nandeshwar key_idx = peer->ucast_keyidx;
1007a7cfbb18SPavankumar Nandeshwar else
1008a7cfbb18SPavankumar Nandeshwar key_idx = peer->mcast_keyidx;
1009a7cfbb18SPavankumar Nandeshwar
1010a7cfbb18SPavankumar Nandeshwar key_conf = peer->keys[key_idx];
1011a7cfbb18SPavankumar Nandeshwar
1012a7cfbb18SPavankumar Nandeshwar data = msdu->data + head_len;
1013a7cfbb18SPavankumar Nandeshwar data_len = msdu->len - head_len - tail_len;
1014a7cfbb18SPavankumar Nandeshwar key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
1015a7cfbb18SPavankumar Nandeshwar
1016972f34d5SPavankumar Nandeshwar ret = ath12k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data,
1017972f34d5SPavankumar Nandeshwar data_len, mic);
1018a7cfbb18SPavankumar Nandeshwar if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
1019a7cfbb18SPavankumar Nandeshwar goto mic_fail;
1020a7cfbb18SPavankumar Nandeshwar
1021a7cfbb18SPavankumar Nandeshwar return 0;
1022a7cfbb18SPavankumar Nandeshwar
1023a7cfbb18SPavankumar Nandeshwar mic_fail:
1024a7cfbb18SPavankumar Nandeshwar (ATH12K_SKB_RXCB(msdu))->is_first_msdu = true;
1025a7cfbb18SPavankumar Nandeshwar (ATH12K_SKB_RXCB(msdu))->is_last_msdu = true;
1026a7cfbb18SPavankumar Nandeshwar
10277cd7392aSAlok Singh ath12k_dp_extract_rx_desc_data(hal, rx_info, rx_desc, rx_desc);
1028a7cfbb18SPavankumar Nandeshwar
1029a7cfbb18SPavankumar Nandeshwar rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
1030a7cfbb18SPavankumar Nandeshwar RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
1031a7cfbb18SPavankumar Nandeshwar skb_pull(msdu, hal_rx_desc_sz);
1032a7cfbb18SPavankumar Nandeshwar
1033775fe5acSPavankumar Nandeshwar if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(dp, rx_desc, msdu,
1034e8a1e49cSPavankumar Nandeshwar rx_info)))
1035a7cfbb18SPavankumar Nandeshwar return -EINVAL;
1036a7cfbb18SPavankumar Nandeshwar
10379e0ee04fSRipan Deuri ath12k_dp_rx_h_ppdu(dp_pdev, rx_info);
10389e0ee04fSRipan Deuri ath12k_dp_rx_h_undecap(dp_pdev, msdu, rx_desc,
1039e8a1e49cSPavankumar Nandeshwar HAL_ENCRYPT_TYPE_TKIP_MIC, true, rx_info);
10409e0ee04fSRipan Deuri ieee80211_rx(ath12k_pdev_dp_to_hw(dp_pdev), msdu);
1041a7cfbb18SPavankumar Nandeshwar return -EINVAL;
1042a7cfbb18SPavankumar Nandeshwar }
1043a7cfbb18SPavankumar Nandeshwar
ath12k_wifi7_dp_rx_h_defrag(struct ath12k_pdev_dp * dp_pdev,struct ath12k_dp_peer * peer,struct ath12k_dp_rx_tid * rx_tid,struct sk_buff ** defrag_skb,enum hal_encrypt_type enctype,struct hal_rx_desc_data * rx_info)10449e0ee04fSRipan Deuri static int ath12k_wifi7_dp_rx_h_defrag(struct ath12k_pdev_dp *dp_pdev,
104511157e09SHarsh Kumar Bijlani struct ath12k_dp_peer *peer,
10466b4954d3SPavankumar Nandeshwar struct ath12k_dp_rx_tid *rx_tid,
1047e8a1e49cSPavankumar Nandeshwar struct sk_buff **defrag_skb,
1048e8a1e49cSPavankumar Nandeshwar enum hal_encrypt_type enctype,
1049e8a1e49cSPavankumar Nandeshwar struct hal_rx_desc_data *rx_info)
10506b4954d3SPavankumar Nandeshwar {
10519e0ee04fSRipan Deuri struct ath12k_dp *dp = dp_pdev->dp;
10529e0ee04fSRipan Deuri struct ath12k_base *ab = dp->ab;
10536b4954d3SPavankumar Nandeshwar struct sk_buff *skb, *first_frag, *last_frag;
10546b4954d3SPavankumar Nandeshwar struct ieee80211_hdr *hdr;
10556b4954d3SPavankumar Nandeshwar bool is_decrypted = false;
10566b4954d3SPavankumar Nandeshwar int msdu_len = 0;
10576b4954d3SPavankumar Nandeshwar int extra_space;
10589e0ee04fSRipan Deuri u32 flags, hal_rx_desc_sz = ab->hal.hal_desc_sz;
10596b4954d3SPavankumar Nandeshwar
10606b4954d3SPavankumar Nandeshwar first_frag = skb_peek(&rx_tid->rx_frags);
10616b4954d3SPavankumar Nandeshwar last_frag = skb_peek_tail(&rx_tid->rx_frags);
10626b4954d3SPavankumar Nandeshwar
10636b4954d3SPavankumar Nandeshwar skb_queue_walk(&rx_tid->rx_frags, skb) {
10646b4954d3SPavankumar Nandeshwar flags = 0;
10656b4954d3SPavankumar Nandeshwar hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
10666b4954d3SPavankumar Nandeshwar
10676b4954d3SPavankumar Nandeshwar if (enctype != HAL_ENCRYPT_TYPE_OPEN)
1068e8a1e49cSPavankumar Nandeshwar is_decrypted = rx_info->is_decrypted;
10696b4954d3SPavankumar Nandeshwar
10706b4954d3SPavankumar Nandeshwar if (is_decrypted) {
10716b4954d3SPavankumar Nandeshwar if (skb != first_frag)
10726b4954d3SPavankumar Nandeshwar flags |= RX_FLAG_IV_STRIPPED;
10736b4954d3SPavankumar Nandeshwar if (skb != last_frag)
10746b4954d3SPavankumar Nandeshwar flags |= RX_FLAG_ICV_STRIPPED |
10756b4954d3SPavankumar Nandeshwar RX_FLAG_MIC_STRIPPED;
10766b4954d3SPavankumar Nandeshwar }
10776b4954d3SPavankumar Nandeshwar
10786b4954d3SPavankumar Nandeshwar /* RX fragments are always raw packets */
10796b4954d3SPavankumar Nandeshwar if (skb != last_frag)
10806b4954d3SPavankumar Nandeshwar skb_trim(skb, skb->len - FCS_LEN);
10819e0ee04fSRipan Deuri ath12k_dp_rx_h_undecap_frag(dp_pdev, skb, enctype, flags);
10826b4954d3SPavankumar Nandeshwar
10836b4954d3SPavankumar Nandeshwar if (skb != first_frag)
10846b4954d3SPavankumar Nandeshwar skb_pull(skb, hal_rx_desc_sz +
10856b4954d3SPavankumar Nandeshwar ieee80211_hdrlen(hdr->frame_control));
10866b4954d3SPavankumar Nandeshwar msdu_len += skb->len;
10876b4954d3SPavankumar Nandeshwar }
10886b4954d3SPavankumar Nandeshwar
10896b4954d3SPavankumar Nandeshwar extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
10906b4954d3SPavankumar Nandeshwar if (extra_space > 0 &&
10916b4954d3SPavankumar Nandeshwar (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
10926b4954d3SPavankumar Nandeshwar return -ENOMEM;
10936b4954d3SPavankumar Nandeshwar
10946b4954d3SPavankumar Nandeshwar __skb_unlink(first_frag, &rx_tid->rx_frags);
10956b4954d3SPavankumar Nandeshwar while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
10966b4954d3SPavankumar Nandeshwar skb_put_data(first_frag, skb->data, skb->len);
10976b4954d3SPavankumar Nandeshwar dev_kfree_skb_any(skb);
10986b4954d3SPavankumar Nandeshwar }
10996b4954d3SPavankumar Nandeshwar
11006b4954d3SPavankumar Nandeshwar hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz);
11016b4954d3SPavankumar Nandeshwar hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
11026b4954d3SPavankumar Nandeshwar ATH12K_SKB_RXCB(first_frag)->is_frag = 1;
11036b4954d3SPavankumar Nandeshwar
11049e0ee04fSRipan Deuri if (ath12k_wifi7_dp_rx_h_verify_tkip_mic(dp_pdev, peer, enctype, first_frag,
11059e0ee04fSRipan Deuri rx_info))
11066b4954d3SPavankumar Nandeshwar first_frag = NULL;
11076b4954d3SPavankumar Nandeshwar
11086b4954d3SPavankumar Nandeshwar *defrag_skb = first_frag;
11096b4954d3SPavankumar Nandeshwar return 0;
11106b4954d3SPavankumar Nandeshwar }
11116b4954d3SPavankumar Nandeshwar
ath12k_wifi7_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid * rx_tid,bool rel_link_desc)1112d43133a3SPavankumar Nandeshwar void ath12k_wifi7_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid *rx_tid,
1113d43133a3SPavankumar Nandeshwar bool rel_link_desc)
1114d43133a3SPavankumar Nandeshwar {
1115d43133a3SPavankumar Nandeshwar enum hal_wbm_rel_bm_act act = HAL_WBM_REL_BM_ACT_PUT_IN_IDLE;
1116d43133a3SPavankumar Nandeshwar struct ath12k_buffer_addr *buf_addr_info;
1117d43133a3SPavankumar Nandeshwar struct ath12k_dp *dp = rx_tid->dp;
1118d43133a3SPavankumar Nandeshwar
1119d43133a3SPavankumar Nandeshwar lockdep_assert_held(&dp->dp_lock);
1120d43133a3SPavankumar Nandeshwar
1121d43133a3SPavankumar Nandeshwar if (rx_tid->dst_ring_desc) {
1122d43133a3SPavankumar Nandeshwar if (rel_link_desc) {
1123d43133a3SPavankumar Nandeshwar buf_addr_info = &rx_tid->dst_ring_desc->buf_addr_info;
112496b42732SPavankumar Nandeshwar ath12k_wifi7_dp_rx_link_desc_return(dp, buf_addr_info, act);
1125d43133a3SPavankumar Nandeshwar }
1126d43133a3SPavankumar Nandeshwar kfree(rx_tid->dst_ring_desc);
1127d43133a3SPavankumar Nandeshwar rx_tid->dst_ring_desc = NULL;
1128d43133a3SPavankumar Nandeshwar }
1129d43133a3SPavankumar Nandeshwar
1130d43133a3SPavankumar Nandeshwar rx_tid->cur_sn = 0;
1131d43133a3SPavankumar Nandeshwar rx_tid->last_frag_no = 0;
1132d43133a3SPavankumar Nandeshwar rx_tid->rx_frag_bitmap = 0;
1133d43133a3SPavankumar Nandeshwar __skb_queue_purge(&rx_tid->rx_frags);
1134d43133a3SPavankumar Nandeshwar }
1135d43133a3SPavankumar Nandeshwar
ath12k_wifi7_dp_rx_frag_h_mpdu(struct ath12k_pdev_dp * dp_pdev,struct sk_buff * msdu,struct hal_reo_dest_ring * ring_desc,struct hal_rx_desc_data * rx_info)11369e0ee04fSRipan Deuri static int ath12k_wifi7_dp_rx_frag_h_mpdu(struct ath12k_pdev_dp *dp_pdev,
11376b4954d3SPavankumar Nandeshwar struct sk_buff *msdu,
1138e8a1e49cSPavankumar Nandeshwar struct hal_reo_dest_ring *ring_desc,
1139e8a1e49cSPavankumar Nandeshwar struct hal_rx_desc_data *rx_info)
11406b4954d3SPavankumar Nandeshwar {
11419e0ee04fSRipan Deuri struct ath12k_dp *dp = dp_pdev->dp;
114296b42732SPavankumar Nandeshwar struct ath12k_hal *hal = dp->hal;
11439e0ee04fSRipan Deuri struct ath12k_base *ab = dp->ab;
114411157e09SHarsh Kumar Bijlani struct ath12k_dp_peer *peer;
11456b4954d3SPavankumar Nandeshwar struct ath12k_dp_rx_tid *rx_tid;
11466b4954d3SPavankumar Nandeshwar struct sk_buff *defrag_skb = NULL;
1147e8a1e49cSPavankumar Nandeshwar u32 peer_id = rx_info->peer_id;
11486b4954d3SPavankumar Nandeshwar u16 seqno, frag_no;
1149e8a1e49cSPavankumar Nandeshwar u8 tid = rx_info->tid;
11506b4954d3SPavankumar Nandeshwar int ret = 0;
11516b4954d3SPavankumar Nandeshwar bool more_frags;
1152e8a1e49cSPavankumar Nandeshwar enum hal_encrypt_type enctype = rx_info->enctype;
11536b4954d3SPavankumar Nandeshwar
115496b42732SPavankumar Nandeshwar frag_no = ath12k_dp_rx_h_frag_no(hal, msdu);
115596b42732SPavankumar Nandeshwar more_frags = ath12k_dp_rx_h_more_frags(hal, msdu);
1156e8a1e49cSPavankumar Nandeshwar seqno = rx_info->seq_no;
11576b4954d3SPavankumar Nandeshwar
1158e8a1e49cSPavankumar Nandeshwar if (!rx_info->seq_ctl_valid || !rx_info->fc_valid ||
11596b4954d3SPavankumar Nandeshwar tid > IEEE80211_NUM_TIDS)
11606b4954d3SPavankumar Nandeshwar return -EINVAL;
11616b4954d3SPavankumar Nandeshwar
11626b4954d3SPavankumar Nandeshwar /* received unfragmented packet in reo
11636b4954d3SPavankumar Nandeshwar * exception ring, this shouldn't happen
11646b4954d3SPavankumar Nandeshwar * as these packets typically come from
11656b4954d3SPavankumar Nandeshwar * reo2sw srngs.
11666b4954d3SPavankumar Nandeshwar */
11676b4954d3SPavankumar Nandeshwar if (WARN_ON_ONCE(!frag_no && !more_frags))
11686b4954d3SPavankumar Nandeshwar return -EINVAL;
11696b4954d3SPavankumar Nandeshwar
11700cafe8ccSHarsh Kumar Bijlani spin_lock_bh(&dp->dp_lock);
117111157e09SHarsh Kumar Bijlani peer = ath12k_dp_peer_find_by_peerid(dp_pdev, peer_id);
11726b4954d3SPavankumar Nandeshwar if (!peer) {
11736b4954d3SPavankumar Nandeshwar ath12k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
11746b4954d3SPavankumar Nandeshwar peer_id);
11756b4954d3SPavankumar Nandeshwar ret = -ENOENT;
11766b4954d3SPavankumar Nandeshwar goto out_unlock;
11776b4954d3SPavankumar Nandeshwar }
11786b4954d3SPavankumar Nandeshwar
11796b4954d3SPavankumar Nandeshwar if (!peer->dp_setup_done) {
11806b4954d3SPavankumar Nandeshwar ath12k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n",
11816b4954d3SPavankumar Nandeshwar peer->addr, peer_id);
11826b4954d3SPavankumar Nandeshwar ret = -ENOENT;
11836b4954d3SPavankumar Nandeshwar goto out_unlock;
11846b4954d3SPavankumar Nandeshwar }
11856b4954d3SPavankumar Nandeshwar
11866b4954d3SPavankumar Nandeshwar rx_tid = &peer->rx_tid[tid];
11876b4954d3SPavankumar Nandeshwar
11886b4954d3SPavankumar Nandeshwar if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
11896b4954d3SPavankumar Nandeshwar skb_queue_empty(&rx_tid->rx_frags)) {
11906b4954d3SPavankumar Nandeshwar /* Flush stored fragments and start a new sequence */
1191d43133a3SPavankumar Nandeshwar ath12k_wifi7_dp_rx_frags_cleanup(rx_tid, true);
11926b4954d3SPavankumar Nandeshwar rx_tid->cur_sn = seqno;
11936b4954d3SPavankumar Nandeshwar }
11946b4954d3SPavankumar Nandeshwar
11956b4954d3SPavankumar Nandeshwar if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
11966b4954d3SPavankumar Nandeshwar /* Fragment already present */
11976b4954d3SPavankumar Nandeshwar ret = -EINVAL;
11986b4954d3SPavankumar Nandeshwar goto out_unlock;
11996b4954d3SPavankumar Nandeshwar }
12006b4954d3SPavankumar Nandeshwar
12016b4954d3SPavankumar Nandeshwar if ((!rx_tid->rx_frag_bitmap || frag_no > __fls(rx_tid->rx_frag_bitmap)))
12026b4954d3SPavankumar Nandeshwar __skb_queue_tail(&rx_tid->rx_frags, msdu);
12036b4954d3SPavankumar Nandeshwar else
120496b42732SPavankumar Nandeshwar ath12k_dp_rx_h_sort_frags(hal, &rx_tid->rx_frags, msdu);
12056b4954d3SPavankumar Nandeshwar
12066b4954d3SPavankumar Nandeshwar rx_tid->rx_frag_bitmap |= BIT(frag_no);
12076b4954d3SPavankumar Nandeshwar if (!more_frags)
12086b4954d3SPavankumar Nandeshwar rx_tid->last_frag_no = frag_no;
12096b4954d3SPavankumar Nandeshwar
12106b4954d3SPavankumar Nandeshwar if (frag_no == 0) {
12116b4954d3SPavankumar Nandeshwar rx_tid->dst_ring_desc = kmemdup(ring_desc,
12126b4954d3SPavankumar Nandeshwar sizeof(*rx_tid->dst_ring_desc),
12136b4954d3SPavankumar Nandeshwar GFP_ATOMIC);
12146b4954d3SPavankumar Nandeshwar if (!rx_tid->dst_ring_desc) {
12156b4954d3SPavankumar Nandeshwar ret = -ENOMEM;
12166b4954d3SPavankumar Nandeshwar goto out_unlock;
12176b4954d3SPavankumar Nandeshwar }
12186b4954d3SPavankumar Nandeshwar } else {
121996b42732SPavankumar Nandeshwar ath12k_wifi7_dp_rx_link_desc_return(dp, &ring_desc->buf_addr_info,
12206b4954d3SPavankumar Nandeshwar HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
12216b4954d3SPavankumar Nandeshwar }
12226b4954d3SPavankumar Nandeshwar
12236b4954d3SPavankumar Nandeshwar if (!rx_tid->last_frag_no ||
12246b4954d3SPavankumar Nandeshwar rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
12256b4954d3SPavankumar Nandeshwar mod_timer(&rx_tid->frag_timer, jiffies +
12266b4954d3SPavankumar Nandeshwar ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS);
12276b4954d3SPavankumar Nandeshwar goto out_unlock;
12286b4954d3SPavankumar Nandeshwar }
12296b4954d3SPavankumar Nandeshwar
12300cafe8ccSHarsh Kumar Bijlani spin_unlock_bh(&dp->dp_lock);
12316b4954d3SPavankumar Nandeshwar timer_delete_sync(&rx_tid->frag_timer);
12320cafe8ccSHarsh Kumar Bijlani spin_lock_bh(&dp->dp_lock);
12336b4954d3SPavankumar Nandeshwar
123411157e09SHarsh Kumar Bijlani peer = ath12k_dp_peer_find_by_peerid(dp_pdev, peer_id);
12356b4954d3SPavankumar Nandeshwar if (!peer)
12366b4954d3SPavankumar Nandeshwar goto err_frags_cleanup;
12376b4954d3SPavankumar Nandeshwar
12389e0ee04fSRipan Deuri if (!ath12k_wifi7_dp_rx_h_defrag_validate_incr_pn(dp_pdev, rx_tid, enctype))
12396b4954d3SPavankumar Nandeshwar goto err_frags_cleanup;
12406b4954d3SPavankumar Nandeshwar
12419e0ee04fSRipan Deuri if (ath12k_wifi7_dp_rx_h_defrag(dp_pdev, peer, rx_tid, &defrag_skb,
1242e8a1e49cSPavankumar Nandeshwar enctype, rx_info))
12436b4954d3SPavankumar Nandeshwar goto err_frags_cleanup;
12446b4954d3SPavankumar Nandeshwar
12456b4954d3SPavankumar Nandeshwar if (!defrag_skb)
12466b4954d3SPavankumar Nandeshwar goto err_frags_cleanup;
12476b4954d3SPavankumar Nandeshwar
12489e0ee04fSRipan Deuri if (ath12k_wifi7_dp_rx_h_defrag_reo_reinject(dp, rx_tid, defrag_skb))
12496b4954d3SPavankumar Nandeshwar goto err_frags_cleanup;
12506b4954d3SPavankumar Nandeshwar
1251d43133a3SPavankumar Nandeshwar ath12k_wifi7_dp_rx_frags_cleanup(rx_tid, false);
12526b4954d3SPavankumar Nandeshwar goto out_unlock;
12536b4954d3SPavankumar Nandeshwar
12546b4954d3SPavankumar Nandeshwar err_frags_cleanup:
12556b4954d3SPavankumar Nandeshwar dev_kfree_skb_any(defrag_skb);
1256d43133a3SPavankumar Nandeshwar ath12k_wifi7_dp_rx_frags_cleanup(rx_tid, true);
12576b4954d3SPavankumar Nandeshwar out_unlock:
12580cafe8ccSHarsh Kumar Bijlani spin_unlock_bh(&dp->dp_lock);
12596b4954d3SPavankumar Nandeshwar return ret;
12606b4954d3SPavankumar Nandeshwar }
12616b4954d3SPavankumar Nandeshwar
12626b4954d3SPavankumar Nandeshwar static int
ath12k_wifi7_dp_process_rx_err_buf(struct ath12k_pdev_dp * dp_pdev,struct hal_reo_dest_ring * desc,struct list_head * used_list,bool drop,u32 cookie)12639e0ee04fSRipan Deuri ath12k_wifi7_dp_process_rx_err_buf(struct ath12k_pdev_dp *dp_pdev,
1264972f34d5SPavankumar Nandeshwar struct hal_reo_dest_ring *desc,
12656b4954d3SPavankumar Nandeshwar struct list_head *used_list,
12666b4954d3SPavankumar Nandeshwar bool drop, u32 cookie)
12676b4954d3SPavankumar Nandeshwar {
12689e0ee04fSRipan Deuri struct ath12k *ar = ath12k_pdev_dp_to_ar(dp_pdev);
12699e0ee04fSRipan Deuri struct ath12k_dp *dp = dp_pdev->dp;
127096b42732SPavankumar Nandeshwar struct ath12k_hal *hal = dp->hal;
12716b4954d3SPavankumar Nandeshwar struct sk_buff *msdu;
12726b4954d3SPavankumar Nandeshwar struct ath12k_skb_rxcb *rxcb;
1273e8a1e49cSPavankumar Nandeshwar struct hal_rx_desc_data rx_info;
12746b4954d3SPavankumar Nandeshwar struct hal_rx_desc *rx_desc;
12756b4954d3SPavankumar Nandeshwar u16 msdu_len;
127696b42732SPavankumar Nandeshwar u32 hal_rx_desc_sz = hal->hal_desc_sz;
12776b4954d3SPavankumar Nandeshwar struct ath12k_rx_desc_info *desc_info;
12786b4954d3SPavankumar Nandeshwar u64 desc_va;
12796b4954d3SPavankumar Nandeshwar
12806b4954d3SPavankumar Nandeshwar desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
12816b4954d3SPavankumar Nandeshwar le32_to_cpu(desc->buf_va_lo));
12826b4954d3SPavankumar Nandeshwar desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
12836b4954d3SPavankumar Nandeshwar
12846b4954d3SPavankumar Nandeshwar /* retry manual desc retrieval */
12856b4954d3SPavankumar Nandeshwar if (!desc_info) {
128696b42732SPavankumar Nandeshwar desc_info = ath12k_dp_get_rx_desc(dp, cookie);
12876b4954d3SPavankumar Nandeshwar if (!desc_info) {
128896b42732SPavankumar Nandeshwar ath12k_warn(dp->ab,
128996b42732SPavankumar Nandeshwar "Invalid cookie in DP rx error descriptor retrieval: 0x%x\n",
12906b4954d3SPavankumar Nandeshwar cookie);
12916b4954d3SPavankumar Nandeshwar return -EINVAL;
12926b4954d3SPavankumar Nandeshwar }
12936b4954d3SPavankumar Nandeshwar }
12946b4954d3SPavankumar Nandeshwar
12956b4954d3SPavankumar Nandeshwar if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
129696b42732SPavankumar Nandeshwar ath12k_warn(dp->ab, "RX Exception, Check HW CC implementation");
12976b4954d3SPavankumar Nandeshwar
12986b4954d3SPavankumar Nandeshwar msdu = desc_info->skb;
12996b4954d3SPavankumar Nandeshwar desc_info->skb = NULL;
13006b4954d3SPavankumar Nandeshwar
13016b4954d3SPavankumar Nandeshwar list_add_tail(&desc_info->list, used_list);
13026b4954d3SPavankumar Nandeshwar
13036b4954d3SPavankumar Nandeshwar rxcb = ATH12K_SKB_RXCB(msdu);
130496b42732SPavankumar Nandeshwar dma_unmap_single(dp->dev, rxcb->paddr,
13056b4954d3SPavankumar Nandeshwar msdu->len + skb_tailroom(msdu),
13066b4954d3SPavankumar Nandeshwar DMA_FROM_DEVICE);
13076b4954d3SPavankumar Nandeshwar
13086b4954d3SPavankumar Nandeshwar if (drop) {
13096b4954d3SPavankumar Nandeshwar dev_kfree_skb_any(msdu);
13106b4954d3SPavankumar Nandeshwar return 0;
13116b4954d3SPavankumar Nandeshwar }
13126b4954d3SPavankumar Nandeshwar
13136b4954d3SPavankumar Nandeshwar rcu_read_lock();
13146b4954d3SPavankumar Nandeshwar if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
13156b4954d3SPavankumar Nandeshwar dev_kfree_skb_any(msdu);
13166b4954d3SPavankumar Nandeshwar goto exit;
13176b4954d3SPavankumar Nandeshwar }
13186b4954d3SPavankumar Nandeshwar
13196b4954d3SPavankumar Nandeshwar if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
13206b4954d3SPavankumar Nandeshwar dev_kfree_skb_any(msdu);
13216b4954d3SPavankumar Nandeshwar goto exit;
13226b4954d3SPavankumar Nandeshwar }
13236b4954d3SPavankumar Nandeshwar
13246b4954d3SPavankumar Nandeshwar rx_desc = (struct hal_rx_desc *)msdu->data;
13257cd7392aSAlok Singh ath12k_dp_extract_rx_desc_data(hal, &rx_info, rx_desc, rx_desc);
1326e8a1e49cSPavankumar Nandeshwar
1327e8a1e49cSPavankumar Nandeshwar msdu_len = rx_info.msdu_len;
13286b4954d3SPavankumar Nandeshwar if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
132996b42732SPavankumar Nandeshwar ath12k_warn(dp->ab, "invalid msdu leng %u", msdu_len);
133096b42732SPavankumar Nandeshwar ath12k_dbg_dump(dp->ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
13316b4954d3SPavankumar Nandeshwar sizeof(*rx_desc));
13326b4954d3SPavankumar Nandeshwar dev_kfree_skb_any(msdu);
13336b4954d3SPavankumar Nandeshwar goto exit;
13346b4954d3SPavankumar Nandeshwar }
13356b4954d3SPavankumar Nandeshwar
13366b4954d3SPavankumar Nandeshwar skb_put(msdu, hal_rx_desc_sz + msdu_len);
13376b4954d3SPavankumar Nandeshwar
13389e0ee04fSRipan Deuri if (ath12k_wifi7_dp_rx_frag_h_mpdu(dp_pdev, msdu, desc, &rx_info)) {
13396b4954d3SPavankumar Nandeshwar dev_kfree_skb_any(msdu);
134096b42732SPavankumar Nandeshwar ath12k_wifi7_dp_rx_link_desc_return(dp, &desc->buf_addr_info,
13416b4954d3SPavankumar Nandeshwar HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
13426b4954d3SPavankumar Nandeshwar }
13436b4954d3SPavankumar Nandeshwar exit:
13446b4954d3SPavankumar Nandeshwar rcu_read_unlock();
13456b4954d3SPavankumar Nandeshwar return 0;
13466b4954d3SPavankumar Nandeshwar }
13476b4954d3SPavankumar Nandeshwar
ath12k_dp_h_msdu_buffer_type(struct ath12k_dp * dp,struct list_head * list,struct hal_reo_dest_ring * desc)1348631ee338SJeff Johnson static int ath12k_dp_h_msdu_buffer_type(struct ath12k_dp *dp,
1349631ee338SJeff Johnson struct list_head *list,
1350631ee338SJeff Johnson struct hal_reo_dest_ring *desc)
1351631ee338SJeff Johnson {
1352631ee338SJeff Johnson struct ath12k_rx_desc_info *desc_info;
1353631ee338SJeff Johnson struct ath12k_skb_rxcb *rxcb;
1354631ee338SJeff Johnson struct sk_buff *msdu;
1355631ee338SJeff Johnson u64 desc_va;
1356631ee338SJeff Johnson
1357631ee338SJeff Johnson dp->device_stats.reo_excep_msdu_buf_type++;
1358631ee338SJeff Johnson
1359631ee338SJeff Johnson desc_va = (u64)le32_to_cpu(desc->buf_va_hi) << 32 |
1360631ee338SJeff Johnson le32_to_cpu(desc->buf_va_lo);
1361631ee338SJeff Johnson desc_info = (struct ath12k_rx_desc_info *)(uintptr_t)desc_va;
1362631ee338SJeff Johnson if (!desc_info) {
1363631ee338SJeff Johnson u32 cookie;
1364631ee338SJeff Johnson
1365631ee338SJeff Johnson cookie = le32_get_bits(desc->buf_addr_info.info1,
1366631ee338SJeff Johnson BUFFER_ADDR_INFO1_SW_COOKIE);
1367631ee338SJeff Johnson desc_info = ath12k_dp_get_rx_desc(dp, cookie);
1368631ee338SJeff Johnson if (!desc_info) {
1369631ee338SJeff Johnson ath12k_warn(dp->ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n",
1370631ee338SJeff Johnson cookie);
1371631ee338SJeff Johnson return -EINVAL;
1372631ee338SJeff Johnson }
1373631ee338SJeff Johnson }
1374631ee338SJeff Johnson
1375631ee338SJeff Johnson if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC) {
1376631ee338SJeff Johnson ath12k_warn(dp->ab, "rx exception, magic check failed with value: %u\n",
1377631ee338SJeff Johnson desc_info->magic);
1378631ee338SJeff Johnson return -EINVAL;
1379631ee338SJeff Johnson }
1380631ee338SJeff Johnson
1381631ee338SJeff Johnson msdu = desc_info->skb;
1382631ee338SJeff Johnson desc_info->skb = NULL;
1383631ee338SJeff Johnson list_add_tail(&desc_info->list, list);
1384631ee338SJeff Johnson rxcb = ATH12K_SKB_RXCB(msdu);
1385631ee338SJeff Johnson dma_unmap_single(dp->dev, rxcb->paddr, msdu->len + skb_tailroom(msdu),
1386631ee338SJeff Johnson DMA_FROM_DEVICE);
1387631ee338SJeff Johnson dev_kfree_skb_any(msdu);
1388631ee338SJeff Johnson
1389631ee338SJeff Johnson return 0;
1390631ee338SJeff Johnson }
1391631ee338SJeff Johnson
ath12k_wifi7_dp_rx_process_err(struct ath12k_dp * dp,struct napi_struct * napi,int budget)139296b42732SPavankumar Nandeshwar int ath12k_wifi7_dp_rx_process_err(struct ath12k_dp *dp, struct napi_struct *napi,
13936b4954d3SPavankumar Nandeshwar int budget)
13946b4954d3SPavankumar Nandeshwar {
139596b42732SPavankumar Nandeshwar struct ath12k_base *ab = dp->ab;
139696b42732SPavankumar Nandeshwar struct ath12k_hal *hal = dp->hal;
13978042e30aSRipan Deuri struct ath12k_hw_group *ag = dp->ag;
13988042e30aSRipan Deuri struct ath12k_dp_hw_group *dp_hw_grp = &ag->dp_hw_grp;
13993a52762bSRipan Deuri struct ath12k_dp *partner_dp;
14006b4954d3SPavankumar Nandeshwar struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
14016b4954d3SPavankumar Nandeshwar u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
14026b4954d3SPavankumar Nandeshwar int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
14036b4954d3SPavankumar Nandeshwar struct dp_link_desc_bank *link_desc_banks;
14046b4954d3SPavankumar Nandeshwar enum hal_rx_buf_return_buf_manager rbm;
14056b4954d3SPavankumar Nandeshwar struct hal_rx_msdu_link *link_desc_va;
14066b4954d3SPavankumar Nandeshwar int tot_n_bufs_reaped, quota, ret, i;
14076b4954d3SPavankumar Nandeshwar struct hal_reo_dest_ring *reo_desc;
14086b4954d3SPavankumar Nandeshwar struct dp_rxdma_ring *rx_ring;
14096b4954d3SPavankumar Nandeshwar struct dp_srng *reo_except;
14106b4954d3SPavankumar Nandeshwar struct ath12k_hw_link *hw_links = ag->hw_links;
14119e0ee04fSRipan Deuri struct ath12k_pdev_dp *dp_pdev;
14126b4954d3SPavankumar Nandeshwar u8 hw_link_id, device_id;
14136b4954d3SPavankumar Nandeshwar u32 desc_bank, num_msdus;
14146b4954d3SPavankumar Nandeshwar struct hal_srng *srng;
14156b4954d3SPavankumar Nandeshwar dma_addr_t paddr;
14166b4954d3SPavankumar Nandeshwar bool is_frag;
14176b4954d3SPavankumar Nandeshwar bool drop;
14189e0ee04fSRipan Deuri int pdev_idx;
1419972f34d5SPavankumar Nandeshwar struct list_head *used_list;
1420972f34d5SPavankumar Nandeshwar enum hal_wbm_rel_bm_act act;
14216b4954d3SPavankumar Nandeshwar
14226b4954d3SPavankumar Nandeshwar tot_n_bufs_reaped = 0;
14236b4954d3SPavankumar Nandeshwar quota = budget;
14246b4954d3SPavankumar Nandeshwar
14256b4954d3SPavankumar Nandeshwar for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
14266b4954d3SPavankumar Nandeshwar INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
14276b4954d3SPavankumar Nandeshwar
14283a52762bSRipan Deuri reo_except = &dp->reo_except_ring;
14296b4954d3SPavankumar Nandeshwar
143096b42732SPavankumar Nandeshwar srng = &hal->srng_list[reo_except->ring_id];
14316b4954d3SPavankumar Nandeshwar
14326b4954d3SPavankumar Nandeshwar spin_lock_bh(&srng->lock);
14336b4954d3SPavankumar Nandeshwar
14346b4954d3SPavankumar Nandeshwar ath12k_hal_srng_access_begin(ab, srng);
14356b4954d3SPavankumar Nandeshwar
14366b4954d3SPavankumar Nandeshwar while (budget &&
14376b4954d3SPavankumar Nandeshwar (reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
14386b4954d3SPavankumar Nandeshwar drop = false;
1439775fe5acSPavankumar Nandeshwar dp->device_stats.err_ring_pkts++;
14406b4954d3SPavankumar Nandeshwar
1441631ee338SJeff Johnson hw_link_id = le32_get_bits(reo_desc->info0,
1442631ee338SJeff Johnson HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
1443631ee338SJeff Johnson device_id = hw_links[hw_link_id].device_id;
1444631ee338SJeff Johnson partner_dp = ath12k_dp_hw_grp_to_dp(dp_hw_grp, device_id);
1445631ee338SJeff Johnson
1446631ee338SJeff Johnson /* Below case is added to handle data packet from un-associated clients.
1447631ee338SJeff Johnson * As it is expected that AST lookup will fail for
1448631ee338SJeff Johnson * un-associated station's data packets.
1449631ee338SJeff Johnson */
1450631ee338SJeff Johnson if (le32_get_bits(reo_desc->info0, HAL_REO_DEST_RING_INFO0_BUFFER_TYPE) ==
1451631ee338SJeff Johnson HAL_REO_DEST_RING_BUFFER_TYPE_MSDU) {
1452631ee338SJeff Johnson if (!ath12k_dp_h_msdu_buffer_type(partner_dp,
1453631ee338SJeff Johnson &rx_desc_used_list[device_id],
1454631ee338SJeff Johnson reo_desc)) {
1455631ee338SJeff Johnson num_buffs_reaped[device_id]++;
1456631ee338SJeff Johnson tot_n_bufs_reaped++;
1457631ee338SJeff Johnson }
1458631ee338SJeff Johnson goto next_desc;
1459631ee338SJeff Johnson }
1460631ee338SJeff Johnson
146196b42732SPavankumar Nandeshwar ret = ath12k_wifi7_hal_desc_reo_parse_err(dp, reo_desc, &paddr,
14626b4954d3SPavankumar Nandeshwar &desc_bank);
14636b4954d3SPavankumar Nandeshwar if (ret) {
14646b4954d3SPavankumar Nandeshwar ath12k_warn(ab, "failed to parse error reo desc %d\n",
14656b4954d3SPavankumar Nandeshwar ret);
14666b4954d3SPavankumar Nandeshwar continue;
14676b4954d3SPavankumar Nandeshwar }
14686b4954d3SPavankumar Nandeshwar
146996b42732SPavankumar Nandeshwar pdev_idx = ath12k_hw_mac_id_to_pdev_id(partner_dp->hw_params,
14706b4954d3SPavankumar Nandeshwar hw_links[hw_link_id].pdev_idx);
14716b4954d3SPavankumar Nandeshwar
14723a52762bSRipan Deuri link_desc_banks = partner_dp->link_desc_banks;
14736b4954d3SPavankumar Nandeshwar link_desc_va = link_desc_banks[desc_bank].vaddr +
14746b4954d3SPavankumar Nandeshwar (paddr - link_desc_banks[desc_bank].paddr);
1475972f34d5SPavankumar Nandeshwar ath12k_wifi7_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
1476972f34d5SPavankumar Nandeshwar msdu_cookies, &rbm);
14773a52762bSRipan Deuri if (rbm != partner_dp->idle_link_rbm &&
14786b4954d3SPavankumar Nandeshwar rbm != HAL_RX_BUF_RBM_SW3_BM &&
147996b42732SPavankumar Nandeshwar rbm != partner_dp->hal->hal_params->rx_buf_rbm) {
1480972f34d5SPavankumar Nandeshwar act = HAL_WBM_REL_BM_ACT_REL_MSDU;
1481775fe5acSPavankumar Nandeshwar dp->device_stats.invalid_rbm++;
14826b4954d3SPavankumar Nandeshwar ath12k_warn(ab, "invalid return buffer manager %d\n", rbm);
148396b42732SPavankumar Nandeshwar ath12k_wifi7_dp_rx_link_desc_return(partner_dp,
14846b4954d3SPavankumar Nandeshwar &reo_desc->buf_addr_info,
1485972f34d5SPavankumar Nandeshwar act);
14866b4954d3SPavankumar Nandeshwar continue;
14876b4954d3SPavankumar Nandeshwar }
14886b4954d3SPavankumar Nandeshwar
14896b4954d3SPavankumar Nandeshwar is_frag = !!(le32_to_cpu(reo_desc->rx_mpdu_info.info0) &
14906b4954d3SPavankumar Nandeshwar RX_MPDU_DESC_INFO0_FRAG_FLAG);
14916b4954d3SPavankumar Nandeshwar
14926b4954d3SPavankumar Nandeshwar /* Process only rx fragments with one msdu per link desc below, and drop
14936b4954d3SPavankumar Nandeshwar * msdu's indicated due to error reasons.
14946b4954d3SPavankumar Nandeshwar * Dynamic fragmentation not supported in Multi-link client, so drop the
14956b4954d3SPavankumar Nandeshwar * partner device buffers.
14966b4954d3SPavankumar Nandeshwar */
14976b4954d3SPavankumar Nandeshwar if (!is_frag || num_msdus > 1 ||
149896b42732SPavankumar Nandeshwar partner_dp->device_id != dp->device_id) {
14996b4954d3SPavankumar Nandeshwar drop = true;
1500972f34d5SPavankumar Nandeshwar act = HAL_WBM_REL_BM_ACT_PUT_IN_IDLE;
15016b4954d3SPavankumar Nandeshwar
15026b4954d3SPavankumar Nandeshwar /* Return the link desc back to wbm idle list */
150396b42732SPavankumar Nandeshwar ath12k_wifi7_dp_rx_link_desc_return(partner_dp,
15046b4954d3SPavankumar Nandeshwar &reo_desc->buf_addr_info,
1505972f34d5SPavankumar Nandeshwar act);
15066b4954d3SPavankumar Nandeshwar }
15076b4954d3SPavankumar Nandeshwar
15089e0ee04fSRipan Deuri rcu_read_lock();
15099e0ee04fSRipan Deuri
15109e0ee04fSRipan Deuri dp_pdev = ath12k_dp_to_pdev_dp(dp, pdev_idx);
15119e0ee04fSRipan Deuri if (!dp_pdev) {
15129e0ee04fSRipan Deuri rcu_read_unlock();
15139e0ee04fSRipan Deuri continue;
15149e0ee04fSRipan Deuri }
15159e0ee04fSRipan Deuri
15166b4954d3SPavankumar Nandeshwar for (i = 0; i < num_msdus; i++) {
1517972f34d5SPavankumar Nandeshwar used_list = &rx_desc_used_list[device_id];
1518972f34d5SPavankumar Nandeshwar
15199e0ee04fSRipan Deuri if (!ath12k_wifi7_dp_process_rx_err_buf(dp_pdev, reo_desc,
1520972f34d5SPavankumar Nandeshwar used_list,
15216b4954d3SPavankumar Nandeshwar drop,
15226b4954d3SPavankumar Nandeshwar msdu_cookies[i])) {
15236b4954d3SPavankumar Nandeshwar num_buffs_reaped[device_id]++;
15246b4954d3SPavankumar Nandeshwar tot_n_bufs_reaped++;
15256b4954d3SPavankumar Nandeshwar }
15266b4954d3SPavankumar Nandeshwar }
15276b4954d3SPavankumar Nandeshwar
15289e0ee04fSRipan Deuri rcu_read_unlock();
15299e0ee04fSRipan Deuri
1530631ee338SJeff Johnson next_desc:
15316b4954d3SPavankumar Nandeshwar if (tot_n_bufs_reaped >= quota) {
15326b4954d3SPavankumar Nandeshwar tot_n_bufs_reaped = quota;
15336b4954d3SPavankumar Nandeshwar goto exit;
15346b4954d3SPavankumar Nandeshwar }
15356b4954d3SPavankumar Nandeshwar
15366b4954d3SPavankumar Nandeshwar budget = quota - tot_n_bufs_reaped;
15376b4954d3SPavankumar Nandeshwar }
15386b4954d3SPavankumar Nandeshwar
15396b4954d3SPavankumar Nandeshwar exit:
15406b4954d3SPavankumar Nandeshwar ath12k_hal_srng_access_end(ab, srng);
15416b4954d3SPavankumar Nandeshwar
15426b4954d3SPavankumar Nandeshwar spin_unlock_bh(&srng->lock);
15436b4954d3SPavankumar Nandeshwar
15446b4954d3SPavankumar Nandeshwar for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
15456b4954d3SPavankumar Nandeshwar if (!num_buffs_reaped[device_id])
15466b4954d3SPavankumar Nandeshwar continue;
15476b4954d3SPavankumar Nandeshwar
15488042e30aSRipan Deuri partner_dp = ath12k_dp_hw_grp_to_dp(dp_hw_grp, device_id);
15493a52762bSRipan Deuri rx_ring = &partner_dp->rx_refill_buf_ring;
15506b4954d3SPavankumar Nandeshwar
155196b42732SPavankumar Nandeshwar ath12k_dp_rx_bufs_replenish(partner_dp, rx_ring,
15526b4954d3SPavankumar Nandeshwar &rx_desc_used_list[device_id],
15536b4954d3SPavankumar Nandeshwar num_buffs_reaped[device_id]);
15546b4954d3SPavankumar Nandeshwar }
15556b4954d3SPavankumar Nandeshwar
15566b4954d3SPavankumar Nandeshwar return tot_n_bufs_reaped;
15576b4954d3SPavankumar Nandeshwar }
15586b4954d3SPavankumar Nandeshwar
1559972f34d5SPavankumar Nandeshwar static void
ath12k_wifi7_dp_rx_null_q_desc_sg_drop(struct ath12k_dp * dp,int msdu_len,struct sk_buff_head * msdu_list)15609e0ee04fSRipan Deuri ath12k_wifi7_dp_rx_null_q_desc_sg_drop(struct ath12k_dp *dp, int msdu_len,
15616b4954d3SPavankumar Nandeshwar struct sk_buff_head *msdu_list)
15626b4954d3SPavankumar Nandeshwar {
15636b4954d3SPavankumar Nandeshwar struct sk_buff *skb, *tmp;
15646b4954d3SPavankumar Nandeshwar struct ath12k_skb_rxcb *rxcb;
15656b4954d3SPavankumar Nandeshwar int n_buffs;
15666b4954d3SPavankumar Nandeshwar
15676b4954d3SPavankumar Nandeshwar n_buffs = DIV_ROUND_UP(msdu_len,
15689e0ee04fSRipan Deuri (DP_RX_BUFFER_SIZE - dp->ab->hal.hal_desc_sz));
15696b4954d3SPavankumar Nandeshwar
15706b4954d3SPavankumar Nandeshwar skb_queue_walk_safe(msdu_list, skb, tmp) {
15716b4954d3SPavankumar Nandeshwar rxcb = ATH12K_SKB_RXCB(skb);
15726b4954d3SPavankumar Nandeshwar if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
15736b4954d3SPavankumar Nandeshwar rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
15746b4954d3SPavankumar Nandeshwar if (!n_buffs)
15756b4954d3SPavankumar Nandeshwar break;
15766b4954d3SPavankumar Nandeshwar __skb_unlink(skb, msdu_list);
15776b4954d3SPavankumar Nandeshwar dev_kfree_skb_any(skb);
15786b4954d3SPavankumar Nandeshwar n_buffs--;
15796b4954d3SPavankumar Nandeshwar }
15806b4954d3SPavankumar Nandeshwar }
15816b4954d3SPavankumar Nandeshwar }
15826b4954d3SPavankumar Nandeshwar
ath12k_wifi7_dp_rx_h_null_q_desc(struct ath12k_pdev_dp * dp_pdev,struct sk_buff * msdu,struct hal_rx_desc_data * rx_info,struct sk_buff_head * msdu_list)15839e0ee04fSRipan Deuri static int ath12k_wifi7_dp_rx_h_null_q_desc(struct ath12k_pdev_dp *dp_pdev,
15849e0ee04fSRipan Deuri struct sk_buff *msdu,
1585e8a1e49cSPavankumar Nandeshwar struct hal_rx_desc_data *rx_info,
15866b4954d3SPavankumar Nandeshwar struct sk_buff_head *msdu_list)
15876b4954d3SPavankumar Nandeshwar {
15889e0ee04fSRipan Deuri struct ath12k_dp *dp = dp_pdev->dp;
15899e0ee04fSRipan Deuri struct ath12k_base *ab = dp->ab;
1590e8a1e49cSPavankumar Nandeshwar u16 msdu_len = rx_info->msdu_len;
15916b4954d3SPavankumar Nandeshwar struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
1592e8a1e49cSPavankumar Nandeshwar u8 l3pad_bytes = rx_info->l3_pad_bytes;
15936b4954d3SPavankumar Nandeshwar struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
15949e0ee04fSRipan Deuri u32 hal_rx_desc_sz = dp->ab->hal.hal_desc_sz;
15956b4954d3SPavankumar Nandeshwar
15966b4954d3SPavankumar Nandeshwar if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) {
15976b4954d3SPavankumar Nandeshwar /* First buffer will be freed by the caller, so deduct it's length */
15986b4954d3SPavankumar Nandeshwar msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz);
15999e0ee04fSRipan Deuri ath12k_wifi7_dp_rx_null_q_desc_sg_drop(dp, msdu_len, msdu_list);
16006b4954d3SPavankumar Nandeshwar return -EINVAL;
16016b4954d3SPavankumar Nandeshwar }
16026b4954d3SPavankumar Nandeshwar
16036b4954d3SPavankumar Nandeshwar /* Even after cleaning up the sg buffers in the msdu list with above check
16046b4954d3SPavankumar Nandeshwar * any msdu received with continuation flag needs to be dropped as invalid.
16056b4954d3SPavankumar Nandeshwar * This protects against some random err frame with continuation flag.
16066b4954d3SPavankumar Nandeshwar */
16076b4954d3SPavankumar Nandeshwar if (rxcb->is_continuation)
16086b4954d3SPavankumar Nandeshwar return -EINVAL;
16096b4954d3SPavankumar Nandeshwar
1610e8a1e49cSPavankumar Nandeshwar if (!rx_info->msdu_done) {
16119e0ee04fSRipan Deuri ath12k_warn(ab,
16126b4954d3SPavankumar Nandeshwar "msdu_done bit not set in null_q_des processing\n");
16136b4954d3SPavankumar Nandeshwar __skb_queue_purge(msdu_list);
16146b4954d3SPavankumar Nandeshwar return -EIO;
16156b4954d3SPavankumar Nandeshwar }
16166b4954d3SPavankumar Nandeshwar
16176b4954d3SPavankumar Nandeshwar /* Handle NULL queue descriptor violations arising out a missing
16186b4954d3SPavankumar Nandeshwar * REO queue for a given peer or a given TID. This typically
16196b4954d3SPavankumar Nandeshwar * may happen if a packet is received on a QOS enabled TID before the
16206b4954d3SPavankumar Nandeshwar * ADDBA negotiation for that TID, when the TID queue is setup. Or
16216b4954d3SPavankumar Nandeshwar * it may also happen for MC/BC frames if they are not routed to the
16226b4954d3SPavankumar Nandeshwar * non-QOS TID queue, in the absence of any other default TID queue.
16236b4954d3SPavankumar Nandeshwar * This error can show up both in a REO destination or WBM release ring.
16246b4954d3SPavankumar Nandeshwar */
16256b4954d3SPavankumar Nandeshwar
16266b4954d3SPavankumar Nandeshwar if (rxcb->is_frag) {
16276b4954d3SPavankumar Nandeshwar skb_pull(msdu, hal_rx_desc_sz);
16286b4954d3SPavankumar Nandeshwar } else {
16296b4954d3SPavankumar Nandeshwar if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
16306b4954d3SPavankumar Nandeshwar return -EINVAL;
16316b4954d3SPavankumar Nandeshwar
16326b4954d3SPavankumar Nandeshwar skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
16336b4954d3SPavankumar Nandeshwar skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
16346b4954d3SPavankumar Nandeshwar }
1635775fe5acSPavankumar Nandeshwar if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(dp, desc, msdu, rx_info)))
16366b4954d3SPavankumar Nandeshwar return -EINVAL;
16376b4954d3SPavankumar Nandeshwar
16389e0ee04fSRipan Deuri ath12k_dp_rx_h_ppdu(dp_pdev, rx_info);
16399e0ee04fSRipan Deuri ath12k_wifi7_dp_rx_h_mpdu(dp_pdev, msdu, desc, rx_info);
16406b4954d3SPavankumar Nandeshwar
16416b4954d3SPavankumar Nandeshwar rxcb->tid = rx_info->tid;
16426b4954d3SPavankumar Nandeshwar
16436b4954d3SPavankumar Nandeshwar /* Please note that caller will having the access to msdu and completing
16446b4954d3SPavankumar Nandeshwar * rx with mac80211. Need not worry about cleaning up amsdu_list.
16456b4954d3SPavankumar Nandeshwar */
16466b4954d3SPavankumar Nandeshwar
16476b4954d3SPavankumar Nandeshwar return 0;
16486b4954d3SPavankumar Nandeshwar }
16491a6a4b6cSPavankumar Nandeshwar
ath12k_wifi7_dp_rx_h_tkip_mic_err(struct ath12k_pdev_dp * dp_pdev,struct sk_buff * msdu,struct hal_rx_desc_data * rx_info)16509e0ee04fSRipan Deuri static bool ath12k_wifi7_dp_rx_h_tkip_mic_err(struct ath12k_pdev_dp *dp_pdev,
16519e0ee04fSRipan Deuri struct sk_buff *msdu,
1652e8a1e49cSPavankumar Nandeshwar struct hal_rx_desc_data *rx_info)
16531a6a4b6cSPavankumar Nandeshwar {
16549e0ee04fSRipan Deuri struct ath12k_dp *dp = dp_pdev->dp;
16559e0ee04fSRipan Deuri struct ath12k_base *ab = dp->ab;
1656e8a1e49cSPavankumar Nandeshwar u16 msdu_len = rx_info->msdu_len;
16571a6a4b6cSPavankumar Nandeshwar struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
1658e8a1e49cSPavankumar Nandeshwar u8 l3pad_bytes = rx_info->l3_pad_bytes;
16591a6a4b6cSPavankumar Nandeshwar struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
16609e0ee04fSRipan Deuri u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
16611a6a4b6cSPavankumar Nandeshwar
1662e8a1e49cSPavankumar Nandeshwar rxcb->is_first_msdu = rx_info->is_first_msdu;
1663e8a1e49cSPavankumar Nandeshwar rxcb->is_last_msdu = rx_info->is_last_msdu;
16641a6a4b6cSPavankumar Nandeshwar
16651a6a4b6cSPavankumar Nandeshwar if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) {
16661a6a4b6cSPavankumar Nandeshwar ath12k_dbg(ab, ATH12K_DBG_DATA,
16671a6a4b6cSPavankumar Nandeshwar "invalid msdu len in tkip mic err %u\n", msdu_len);
16681a6a4b6cSPavankumar Nandeshwar ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", desc,
16691a6a4b6cSPavankumar Nandeshwar sizeof(*desc));
16701a6a4b6cSPavankumar Nandeshwar return true;
16711a6a4b6cSPavankumar Nandeshwar }
16721a6a4b6cSPavankumar Nandeshwar
16731a6a4b6cSPavankumar Nandeshwar skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
16741a6a4b6cSPavankumar Nandeshwar skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
16751a6a4b6cSPavankumar Nandeshwar
1676775fe5acSPavankumar Nandeshwar if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(dp, desc, msdu, rx_info)))
16771a6a4b6cSPavankumar Nandeshwar return true;
16781a6a4b6cSPavankumar Nandeshwar
16799e0ee04fSRipan Deuri ath12k_dp_rx_h_ppdu(dp_pdev, rx_info);
16801a6a4b6cSPavankumar Nandeshwar
16811a6a4b6cSPavankumar Nandeshwar rx_info->rx_status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
16821a6a4b6cSPavankumar Nandeshwar RX_FLAG_DECRYPTED);
16831a6a4b6cSPavankumar Nandeshwar
16849e0ee04fSRipan Deuri ath12k_dp_rx_h_undecap(dp_pdev, msdu, desc,
1685e8a1e49cSPavankumar Nandeshwar HAL_ENCRYPT_TYPE_TKIP_MIC, false, rx_info);
16861a6a4b6cSPavankumar Nandeshwar return false;
16871a6a4b6cSPavankumar Nandeshwar }
16881a6a4b6cSPavankumar Nandeshwar
ath12k_wifi7_dp_rx_h_rxdma_err(struct ath12k_pdev_dp * dp_pdev,struct sk_buff * msdu,struct hal_rx_desc_data * rx_info)16899e0ee04fSRipan Deuri static bool ath12k_wifi7_dp_rx_h_rxdma_err(struct ath12k_pdev_dp *dp_pdev,
16909e0ee04fSRipan Deuri struct sk_buff *msdu,
1691e8a1e49cSPavankumar Nandeshwar struct hal_rx_desc_data *rx_info)
16921a6a4b6cSPavankumar Nandeshwar {
16939e0ee04fSRipan Deuri struct ath12k_dp *dp = dp_pdev->dp;
16941a6a4b6cSPavankumar Nandeshwar struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
16951a6a4b6cSPavankumar Nandeshwar bool drop = false;
16961a6a4b6cSPavankumar Nandeshwar
1697775fe5acSPavankumar Nandeshwar dp->device_stats.rxdma_error[rxcb->err_code]++;
16981a6a4b6cSPavankumar Nandeshwar
16991a6a4b6cSPavankumar Nandeshwar switch (rxcb->err_code) {
17001a6a4b6cSPavankumar Nandeshwar case HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR:
17011a6a4b6cSPavankumar Nandeshwar case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
1702e8a1e49cSPavankumar Nandeshwar if (rx_info->err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) {
17039e0ee04fSRipan Deuri drop = ath12k_wifi7_dp_rx_h_tkip_mic_err(dp_pdev, msdu, rx_info);
17041a6a4b6cSPavankumar Nandeshwar break;
17051a6a4b6cSPavankumar Nandeshwar }
17061a6a4b6cSPavankumar Nandeshwar fallthrough;
17071a6a4b6cSPavankumar Nandeshwar default:
17081a6a4b6cSPavankumar Nandeshwar /* TODO: Review other rxdma error code to check if anything is
17091a6a4b6cSPavankumar Nandeshwar * worth reporting to mac80211
17101a6a4b6cSPavankumar Nandeshwar */
17111a6a4b6cSPavankumar Nandeshwar drop = true;
17121a6a4b6cSPavankumar Nandeshwar break;
17131a6a4b6cSPavankumar Nandeshwar }
17141a6a4b6cSPavankumar Nandeshwar
17151a6a4b6cSPavankumar Nandeshwar return drop;
17161a6a4b6cSPavankumar Nandeshwar }
17171a6a4b6cSPavankumar Nandeshwar
ath12k_wifi7_dp_rx_h_reo_err(struct ath12k_pdev_dp * dp_pdev,struct sk_buff * msdu,struct hal_rx_desc_data * rx_info,struct sk_buff_head * msdu_list)17189e0ee04fSRipan Deuri static bool ath12k_wifi7_dp_rx_h_reo_err(struct ath12k_pdev_dp *dp_pdev,
17199e0ee04fSRipan Deuri struct sk_buff *msdu,
1720e8a1e49cSPavankumar Nandeshwar struct hal_rx_desc_data *rx_info,
17211a6a4b6cSPavankumar Nandeshwar struct sk_buff_head *msdu_list)
17221a6a4b6cSPavankumar Nandeshwar {
17239e0ee04fSRipan Deuri struct ath12k_dp *dp = dp_pdev->dp;
17241a6a4b6cSPavankumar Nandeshwar struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
17251a6a4b6cSPavankumar Nandeshwar bool drop = false;
17261a6a4b6cSPavankumar Nandeshwar
1727775fe5acSPavankumar Nandeshwar dp->device_stats.reo_error[rxcb->err_code]++;
17281a6a4b6cSPavankumar Nandeshwar
17291a6a4b6cSPavankumar Nandeshwar switch (rxcb->err_code) {
17301a6a4b6cSPavankumar Nandeshwar case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
17319e0ee04fSRipan Deuri if (ath12k_wifi7_dp_rx_h_null_q_desc(dp_pdev, msdu, rx_info, msdu_list))
17321a6a4b6cSPavankumar Nandeshwar drop = true;
17331a6a4b6cSPavankumar Nandeshwar break;
17341a6a4b6cSPavankumar Nandeshwar case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
17351a6a4b6cSPavankumar Nandeshwar /* TODO: Do not drop PN failed packets in the driver;
17361a6a4b6cSPavankumar Nandeshwar * instead, it is good to drop such packets in mac80211
17371a6a4b6cSPavankumar Nandeshwar * after incrementing the replay counters.
17381a6a4b6cSPavankumar Nandeshwar */
17391a6a4b6cSPavankumar Nandeshwar fallthrough;
17401a6a4b6cSPavankumar Nandeshwar default:
17411a6a4b6cSPavankumar Nandeshwar /* TODO: Review other errors and process them to mac80211
17421a6a4b6cSPavankumar Nandeshwar * as appropriate.
17431a6a4b6cSPavankumar Nandeshwar */
17441a6a4b6cSPavankumar Nandeshwar drop = true;
17451a6a4b6cSPavankumar Nandeshwar break;
17461a6a4b6cSPavankumar Nandeshwar }
17471a6a4b6cSPavankumar Nandeshwar
17481a6a4b6cSPavankumar Nandeshwar return drop;
17491a6a4b6cSPavankumar Nandeshwar }
17501a6a4b6cSPavankumar Nandeshwar
ath12k_wifi7_dp_rx_wbm_err(struct ath12k_pdev_dp * dp_pdev,struct napi_struct * napi,struct sk_buff * msdu,struct sk_buff_head * msdu_list)17519e0ee04fSRipan Deuri static void ath12k_wifi7_dp_rx_wbm_err(struct ath12k_pdev_dp *dp_pdev,
17521a6a4b6cSPavankumar Nandeshwar struct napi_struct *napi,
17531a6a4b6cSPavankumar Nandeshwar struct sk_buff *msdu,
17541a6a4b6cSPavankumar Nandeshwar struct sk_buff_head *msdu_list)
17551a6a4b6cSPavankumar Nandeshwar {
17569e0ee04fSRipan Deuri struct ath12k_dp *dp = dp_pdev->dp;
1757e8a1e49cSPavankumar Nandeshwar struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
17581a6a4b6cSPavankumar Nandeshwar struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
17591a6a4b6cSPavankumar Nandeshwar struct ieee80211_rx_status rxs = {};
1760e8a1e49cSPavankumar Nandeshwar struct hal_rx_desc_data rx_info;
17611a6a4b6cSPavankumar Nandeshwar bool drop = true;
17621a6a4b6cSPavankumar Nandeshwar
17631a6a4b6cSPavankumar Nandeshwar rx_info.addr2_present = false;
17641a6a4b6cSPavankumar Nandeshwar rx_info.rx_status = &rxs;
17651a6a4b6cSPavankumar Nandeshwar
17667cd7392aSAlok Singh ath12k_dp_extract_rx_desc_data(dp->hal, &rx_info, rx_desc, rx_desc);
1767e8a1e49cSPavankumar Nandeshwar
17681a6a4b6cSPavankumar Nandeshwar switch (rxcb->err_rel_src) {
17691a6a4b6cSPavankumar Nandeshwar case HAL_WBM_REL_SRC_MODULE_REO:
17709e0ee04fSRipan Deuri drop = ath12k_wifi7_dp_rx_h_reo_err(dp_pdev, msdu, &rx_info, msdu_list);
17711a6a4b6cSPavankumar Nandeshwar break;
17721a6a4b6cSPavankumar Nandeshwar case HAL_WBM_REL_SRC_MODULE_RXDMA:
17739e0ee04fSRipan Deuri drop = ath12k_wifi7_dp_rx_h_rxdma_err(dp_pdev, msdu, &rx_info);
17741a6a4b6cSPavankumar Nandeshwar break;
17751a6a4b6cSPavankumar Nandeshwar default:
17761a6a4b6cSPavankumar Nandeshwar /* msdu will get freed */
17771a6a4b6cSPavankumar Nandeshwar break;
17781a6a4b6cSPavankumar Nandeshwar }
17791a6a4b6cSPavankumar Nandeshwar
17801a6a4b6cSPavankumar Nandeshwar if (drop) {
17811a6a4b6cSPavankumar Nandeshwar dev_kfree_skb_any(msdu);
17821a6a4b6cSPavankumar Nandeshwar return;
17831a6a4b6cSPavankumar Nandeshwar }
17841a6a4b6cSPavankumar Nandeshwar
17851a6a4b6cSPavankumar Nandeshwar rx_info.rx_status->flag |= RX_FLAG_SKIP_MONITOR;
17861a6a4b6cSPavankumar Nandeshwar
17879e0ee04fSRipan Deuri ath12k_dp_rx_deliver_msdu(dp_pdev, napi, msdu, &rx_info);
17881a6a4b6cSPavankumar Nandeshwar }
17891a6a4b6cSPavankumar Nandeshwar
ath12k_wifi7_dp_setup_pn_check_reo_cmd(struct ath12k_hal_reo_cmd * cmd,struct ath12k_dp_rx_tid * rx_tid,u32 cipher,enum set_key_cmd key_cmd)1790972f34d5SPavankumar Nandeshwar void ath12k_wifi7_dp_setup_pn_check_reo_cmd(struct ath12k_hal_reo_cmd *cmd,
17916c7ceff2SPavankumar Nandeshwar struct ath12k_dp_rx_tid *rx_tid,
17926c7ceff2SPavankumar Nandeshwar u32 cipher, enum set_key_cmd key_cmd)
17936c7ceff2SPavankumar Nandeshwar {
17946c7ceff2SPavankumar Nandeshwar cmd->flag = HAL_REO_CMD_FLG_NEED_STATUS;
17956c7ceff2SPavankumar Nandeshwar cmd->upd0 = HAL_REO_CMD_UPD0_PN |
17966c7ceff2SPavankumar Nandeshwar HAL_REO_CMD_UPD0_PN_SIZE |
17976c7ceff2SPavankumar Nandeshwar HAL_REO_CMD_UPD0_PN_VALID |
17986c7ceff2SPavankumar Nandeshwar HAL_REO_CMD_UPD0_PN_CHECK |
17996c7ceff2SPavankumar Nandeshwar HAL_REO_CMD_UPD0_SVLD;
18006c7ceff2SPavankumar Nandeshwar
18016c7ceff2SPavankumar Nandeshwar switch (cipher) {
18026c7ceff2SPavankumar Nandeshwar case WLAN_CIPHER_SUITE_TKIP:
18036c7ceff2SPavankumar Nandeshwar case WLAN_CIPHER_SUITE_CCMP:
18046c7ceff2SPavankumar Nandeshwar case WLAN_CIPHER_SUITE_CCMP_256:
18056c7ceff2SPavankumar Nandeshwar case WLAN_CIPHER_SUITE_GCMP:
18066c7ceff2SPavankumar Nandeshwar case WLAN_CIPHER_SUITE_GCMP_256:
18076c7ceff2SPavankumar Nandeshwar if (key_cmd == SET_KEY) {
18086c7ceff2SPavankumar Nandeshwar cmd->upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
18096c7ceff2SPavankumar Nandeshwar cmd->pn_size = 48;
18106c7ceff2SPavankumar Nandeshwar }
18116c7ceff2SPavankumar Nandeshwar break;
18126c7ceff2SPavankumar Nandeshwar default:
18136c7ceff2SPavankumar Nandeshwar break;
18146c7ceff2SPavankumar Nandeshwar }
18156c7ceff2SPavankumar Nandeshwar
18166c7ceff2SPavankumar Nandeshwar cmd->addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
18176c7ceff2SPavankumar Nandeshwar cmd->addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
18186c7ceff2SPavankumar Nandeshwar }
18196c7ceff2SPavankumar Nandeshwar
ath12k_wifi7_dp_rx_process_wbm_err(struct ath12k_dp * dp,struct napi_struct * napi,int budget)182096b42732SPavankumar Nandeshwar int ath12k_wifi7_dp_rx_process_wbm_err(struct ath12k_dp *dp,
18211a6a4b6cSPavankumar Nandeshwar struct napi_struct *napi, int budget)
18221a6a4b6cSPavankumar Nandeshwar {
18231a6a4b6cSPavankumar Nandeshwar struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
182496b42732SPavankumar Nandeshwar struct ath12k_base *ab = dp->ab;
182596b42732SPavankumar Nandeshwar struct ath12k_hal *hal = dp->hal;
18261a6a4b6cSPavankumar Nandeshwar struct ath12k *ar;
18279e0ee04fSRipan Deuri struct ath12k_pdev_dp *dp_pdev;
18288042e30aSRipan Deuri struct ath12k_hw_group *ag = dp->ag;
18298042e30aSRipan Deuri struct ath12k_dp_hw_group *dp_hw_grp = &ag->dp_hw_grp;
18303a52762bSRipan Deuri struct ath12k_dp *partner_dp;
18311a6a4b6cSPavankumar Nandeshwar struct dp_rxdma_ring *rx_ring;
18321a6a4b6cSPavankumar Nandeshwar struct hal_rx_wbm_rel_info err_info;
18331a6a4b6cSPavankumar Nandeshwar struct hal_srng *srng;
18341a6a4b6cSPavankumar Nandeshwar struct sk_buff *msdu;
18351a6a4b6cSPavankumar Nandeshwar struct sk_buff_head msdu_list, scatter_msdu_list;
18361a6a4b6cSPavankumar Nandeshwar struct ath12k_skb_rxcb *rxcb;
18371a6a4b6cSPavankumar Nandeshwar void *rx_desc;
18381a6a4b6cSPavankumar Nandeshwar int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
18391a6a4b6cSPavankumar Nandeshwar int total_num_buffs_reaped = 0;
18401a6a4b6cSPavankumar Nandeshwar struct ath12k_rx_desc_info *desc_info;
1841775fe5acSPavankumar Nandeshwar struct ath12k_device_dp_stats *device_stats = &dp->device_stats;
18421a6a4b6cSPavankumar Nandeshwar struct ath12k_hw_link *hw_links = ag->hw_links;
18431a6a4b6cSPavankumar Nandeshwar u8 hw_link_id, device_id;
18449e0ee04fSRipan Deuri int ret, pdev_idx;
18451a6a4b6cSPavankumar Nandeshwar struct hal_rx_desc *msdu_data;
18461a6a4b6cSPavankumar Nandeshwar
18471a6a4b6cSPavankumar Nandeshwar __skb_queue_head_init(&msdu_list);
18481a6a4b6cSPavankumar Nandeshwar __skb_queue_head_init(&scatter_msdu_list);
18491a6a4b6cSPavankumar Nandeshwar
18501a6a4b6cSPavankumar Nandeshwar for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
18511a6a4b6cSPavankumar Nandeshwar INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
18521a6a4b6cSPavankumar Nandeshwar
185396b42732SPavankumar Nandeshwar srng = &hal->srng_list[dp->rx_rel_ring.ring_id];
18541a6a4b6cSPavankumar Nandeshwar spin_lock_bh(&srng->lock);
18551a6a4b6cSPavankumar Nandeshwar
18561a6a4b6cSPavankumar Nandeshwar ath12k_hal_srng_access_begin(ab, srng);
18571a6a4b6cSPavankumar Nandeshwar
18581a6a4b6cSPavankumar Nandeshwar while (budget) {
18591a6a4b6cSPavankumar Nandeshwar rx_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng);
18601a6a4b6cSPavankumar Nandeshwar if (!rx_desc)
18611a6a4b6cSPavankumar Nandeshwar break;
18621a6a4b6cSPavankumar Nandeshwar
186396b42732SPavankumar Nandeshwar ret = ath12k_wifi7_hal_wbm_desc_parse_err(dp, rx_desc,
1864972f34d5SPavankumar Nandeshwar &err_info);
18651a6a4b6cSPavankumar Nandeshwar if (ret) {
186696b42732SPavankumar Nandeshwar ath12k_warn(ab, "failed to parse rx error in wbm_rel ring desc %d\n",
18671a6a4b6cSPavankumar Nandeshwar ret);
18681a6a4b6cSPavankumar Nandeshwar continue;
18691a6a4b6cSPavankumar Nandeshwar }
18701a6a4b6cSPavankumar Nandeshwar
18711a6a4b6cSPavankumar Nandeshwar desc_info = err_info.rx_desc;
18721a6a4b6cSPavankumar Nandeshwar
18731a6a4b6cSPavankumar Nandeshwar /* retry manual desc retrieval if hw cc is not done */
18741a6a4b6cSPavankumar Nandeshwar if (!desc_info) {
187596b42732SPavankumar Nandeshwar desc_info = ath12k_dp_get_rx_desc(dp, err_info.cookie);
18761a6a4b6cSPavankumar Nandeshwar if (!desc_info) {
18771a6a4b6cSPavankumar Nandeshwar ath12k_warn(ab, "Invalid cookie in DP WBM rx error descriptor retrieval: 0x%x\n",
18781a6a4b6cSPavankumar Nandeshwar err_info.cookie);
18791a6a4b6cSPavankumar Nandeshwar continue;
18801a6a4b6cSPavankumar Nandeshwar }
18811a6a4b6cSPavankumar Nandeshwar }
18821a6a4b6cSPavankumar Nandeshwar
18831a6a4b6cSPavankumar Nandeshwar if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
18841a6a4b6cSPavankumar Nandeshwar ath12k_warn(ab, "WBM RX err, Check HW CC implementation");
18851a6a4b6cSPavankumar Nandeshwar
18861a6a4b6cSPavankumar Nandeshwar msdu = desc_info->skb;
18871a6a4b6cSPavankumar Nandeshwar desc_info->skb = NULL;
18881a6a4b6cSPavankumar Nandeshwar
18891a6a4b6cSPavankumar Nandeshwar device_id = desc_info->device_id;
18908042e30aSRipan Deuri partner_dp = ath12k_dp_hw_grp_to_dp(dp_hw_grp, device_id);
18918042e30aSRipan Deuri if (unlikely(!partner_dp)) {
18921a6a4b6cSPavankumar Nandeshwar dev_kfree_skb_any(msdu);
18931a6a4b6cSPavankumar Nandeshwar
18941a6a4b6cSPavankumar Nandeshwar /* In any case continuation bit is set
18951a6a4b6cSPavankumar Nandeshwar * in the previous record, cleanup scatter_msdu_list
18961a6a4b6cSPavankumar Nandeshwar */
18971a6a4b6cSPavankumar Nandeshwar ath12k_dp_clean_up_skb_list(&scatter_msdu_list);
18981a6a4b6cSPavankumar Nandeshwar continue;
18991a6a4b6cSPavankumar Nandeshwar }
19001a6a4b6cSPavankumar Nandeshwar
19011a6a4b6cSPavankumar Nandeshwar list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]);
19021a6a4b6cSPavankumar Nandeshwar
19031a6a4b6cSPavankumar Nandeshwar rxcb = ATH12K_SKB_RXCB(msdu);
19048042e30aSRipan Deuri dma_unmap_single(partner_dp->dev, rxcb->paddr,
19051a6a4b6cSPavankumar Nandeshwar msdu->len + skb_tailroom(msdu),
19061a6a4b6cSPavankumar Nandeshwar DMA_FROM_DEVICE);
19071a6a4b6cSPavankumar Nandeshwar
19081a6a4b6cSPavankumar Nandeshwar num_buffs_reaped[device_id]++;
19091a6a4b6cSPavankumar Nandeshwar total_num_buffs_reaped++;
19101a6a4b6cSPavankumar Nandeshwar
19111a6a4b6cSPavankumar Nandeshwar if (!err_info.continuation)
19121a6a4b6cSPavankumar Nandeshwar budget--;
19131a6a4b6cSPavankumar Nandeshwar
19141a6a4b6cSPavankumar Nandeshwar if (err_info.push_reason !=
19151a6a4b6cSPavankumar Nandeshwar HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
19161a6a4b6cSPavankumar Nandeshwar dev_kfree_skb_any(msdu);
19171a6a4b6cSPavankumar Nandeshwar continue;
19181a6a4b6cSPavankumar Nandeshwar }
19191a6a4b6cSPavankumar Nandeshwar
19201a6a4b6cSPavankumar Nandeshwar msdu_data = (struct hal_rx_desc *)msdu->data;
19211a6a4b6cSPavankumar Nandeshwar rxcb->err_rel_src = err_info.err_rel_src;
19221a6a4b6cSPavankumar Nandeshwar rxcb->err_code = err_info.err_code;
19231a6a4b6cSPavankumar Nandeshwar rxcb->is_first_msdu = err_info.first_msdu;
19241a6a4b6cSPavankumar Nandeshwar rxcb->is_last_msdu = err_info.last_msdu;
19251a6a4b6cSPavankumar Nandeshwar rxcb->is_continuation = err_info.continuation;
19261a6a4b6cSPavankumar Nandeshwar rxcb->rx_desc = msdu_data;
1927147daefcSPavankumar Nandeshwar rxcb->peer_id = ath12k_wifi7_dp_rx_get_peer_id(dp, dp->peer_metadata_ver,
192811157e09SHarsh Kumar Bijlani err_info.peer_metadata);
19291a6a4b6cSPavankumar Nandeshwar
19301a6a4b6cSPavankumar Nandeshwar if (err_info.continuation) {
19311a6a4b6cSPavankumar Nandeshwar __skb_queue_tail(&scatter_msdu_list, msdu);
19321a6a4b6cSPavankumar Nandeshwar continue;
19331a6a4b6cSPavankumar Nandeshwar }
19341a6a4b6cSPavankumar Nandeshwar
193596b42732SPavankumar Nandeshwar hw_link_id = ath12k_dp_rx_get_msdu_src_link(partner_dp->hal,
19361a6a4b6cSPavankumar Nandeshwar msdu_data);
19371a6a4b6cSPavankumar Nandeshwar if (hw_link_id >= ATH12K_GROUP_MAX_RADIO) {
19381a6a4b6cSPavankumar Nandeshwar dev_kfree_skb_any(msdu);
19391a6a4b6cSPavankumar Nandeshwar
19401a6a4b6cSPavankumar Nandeshwar /* In any case continuation bit is set
19411a6a4b6cSPavankumar Nandeshwar * in the previous record, cleanup scatter_msdu_list
19421a6a4b6cSPavankumar Nandeshwar */
19431a6a4b6cSPavankumar Nandeshwar ath12k_dp_clean_up_skb_list(&scatter_msdu_list);
19441a6a4b6cSPavankumar Nandeshwar continue;
19451a6a4b6cSPavankumar Nandeshwar }
19461a6a4b6cSPavankumar Nandeshwar
19471a6a4b6cSPavankumar Nandeshwar if (!skb_queue_empty(&scatter_msdu_list)) {
19481a6a4b6cSPavankumar Nandeshwar struct sk_buff *msdu;
19491a6a4b6cSPavankumar Nandeshwar
19501a6a4b6cSPavankumar Nandeshwar skb_queue_walk(&scatter_msdu_list, msdu) {
19511a6a4b6cSPavankumar Nandeshwar rxcb = ATH12K_SKB_RXCB(msdu);
19521a6a4b6cSPavankumar Nandeshwar rxcb->hw_link_id = hw_link_id;
19531a6a4b6cSPavankumar Nandeshwar }
19541a6a4b6cSPavankumar Nandeshwar
19551a6a4b6cSPavankumar Nandeshwar skb_queue_splice_tail_init(&scatter_msdu_list,
19561a6a4b6cSPavankumar Nandeshwar &msdu_list);
19571a6a4b6cSPavankumar Nandeshwar }
19581a6a4b6cSPavankumar Nandeshwar
19591a6a4b6cSPavankumar Nandeshwar rxcb = ATH12K_SKB_RXCB(msdu);
19601a6a4b6cSPavankumar Nandeshwar rxcb->hw_link_id = hw_link_id;
19611a6a4b6cSPavankumar Nandeshwar __skb_queue_tail(&msdu_list, msdu);
19621a6a4b6cSPavankumar Nandeshwar }
19631a6a4b6cSPavankumar Nandeshwar
19641a6a4b6cSPavankumar Nandeshwar /* In any case continuation bit is set in the
19651a6a4b6cSPavankumar Nandeshwar * last record, cleanup scatter_msdu_list
19661a6a4b6cSPavankumar Nandeshwar */
19671a6a4b6cSPavankumar Nandeshwar ath12k_dp_clean_up_skb_list(&scatter_msdu_list);
19681a6a4b6cSPavankumar Nandeshwar
19691a6a4b6cSPavankumar Nandeshwar ath12k_hal_srng_access_end(ab, srng);
19701a6a4b6cSPavankumar Nandeshwar
19711a6a4b6cSPavankumar Nandeshwar spin_unlock_bh(&srng->lock);
19721a6a4b6cSPavankumar Nandeshwar
19731a6a4b6cSPavankumar Nandeshwar if (!total_num_buffs_reaped)
19741a6a4b6cSPavankumar Nandeshwar goto done;
19751a6a4b6cSPavankumar Nandeshwar
19761a6a4b6cSPavankumar Nandeshwar for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
19771a6a4b6cSPavankumar Nandeshwar if (!num_buffs_reaped[device_id])
19781a6a4b6cSPavankumar Nandeshwar continue;
19791a6a4b6cSPavankumar Nandeshwar
19808042e30aSRipan Deuri partner_dp = ath12k_dp_hw_grp_to_dp(dp_hw_grp, device_id);
19813a52762bSRipan Deuri rx_ring = &partner_dp->rx_refill_buf_ring;
19821a6a4b6cSPavankumar Nandeshwar
198396b42732SPavankumar Nandeshwar ath12k_dp_rx_bufs_replenish(dp, rx_ring,
19841a6a4b6cSPavankumar Nandeshwar &rx_desc_used_list[device_id],
19851a6a4b6cSPavankumar Nandeshwar num_buffs_reaped[device_id]);
19861a6a4b6cSPavankumar Nandeshwar }
19871a6a4b6cSPavankumar Nandeshwar
19881a6a4b6cSPavankumar Nandeshwar rcu_read_lock();
19891a6a4b6cSPavankumar Nandeshwar while ((msdu = __skb_dequeue(&msdu_list))) {
19901a6a4b6cSPavankumar Nandeshwar rxcb = ATH12K_SKB_RXCB(msdu);
19911a6a4b6cSPavankumar Nandeshwar hw_link_id = rxcb->hw_link_id;
19921a6a4b6cSPavankumar Nandeshwar
19931a6a4b6cSPavankumar Nandeshwar device_id = hw_links[hw_link_id].device_id;
19948042e30aSRipan Deuri partner_dp = ath12k_dp_hw_grp_to_dp(dp_hw_grp, device_id);
19958042e30aSRipan Deuri if (unlikely(!partner_dp)) {
19961a6a4b6cSPavankumar Nandeshwar ath12k_dbg(ab, ATH12K_DBG_DATA,
19971a6a4b6cSPavankumar Nandeshwar "Unable to process WBM error msdu due to invalid hw link id %d device id %d\n",
19981a6a4b6cSPavankumar Nandeshwar hw_link_id, device_id);
19991a6a4b6cSPavankumar Nandeshwar dev_kfree_skb_any(msdu);
20001a6a4b6cSPavankumar Nandeshwar continue;
20011a6a4b6cSPavankumar Nandeshwar }
20021a6a4b6cSPavankumar Nandeshwar
20039e0ee04fSRipan Deuri pdev_idx = ath12k_hw_mac_id_to_pdev_id(partner_dp->hw_params,
20041a6a4b6cSPavankumar Nandeshwar hw_links[hw_link_id].pdev_idx);
20051a6a4b6cSPavankumar Nandeshwar
20069e0ee04fSRipan Deuri dp_pdev = ath12k_dp_to_pdev_dp(partner_dp, pdev_idx);
20079e0ee04fSRipan Deuri if (!dp_pdev) {
20089e0ee04fSRipan Deuri dev_kfree_skb_any(msdu);
20099e0ee04fSRipan Deuri continue;
20109e0ee04fSRipan Deuri }
20119e0ee04fSRipan Deuri ar = ath12k_pdev_dp_to_ar(dp_pdev);
20129e0ee04fSRipan Deuri
20139e0ee04fSRipan Deuri if (!ar || !rcu_dereference(ar->ab->pdevs_active[pdev_idx])) {
20141a6a4b6cSPavankumar Nandeshwar dev_kfree_skb_any(msdu);
20151a6a4b6cSPavankumar Nandeshwar continue;
20161a6a4b6cSPavankumar Nandeshwar }
20171a6a4b6cSPavankumar Nandeshwar
20181a6a4b6cSPavankumar Nandeshwar if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
20191a6a4b6cSPavankumar Nandeshwar dev_kfree_skb_any(msdu);
20201a6a4b6cSPavankumar Nandeshwar continue;
20211a6a4b6cSPavankumar Nandeshwar }
20221a6a4b6cSPavankumar Nandeshwar
20231a6a4b6cSPavankumar Nandeshwar if (rxcb->err_rel_src < HAL_WBM_REL_SRC_MODULE_MAX) {
202496b42732SPavankumar Nandeshwar device_id = dp_pdev->dp->device_id;
20251a6a4b6cSPavankumar Nandeshwar device_stats->rx_wbm_rel_source[rxcb->err_rel_src][device_id]++;
20261a6a4b6cSPavankumar Nandeshwar }
20271a6a4b6cSPavankumar Nandeshwar
20289e0ee04fSRipan Deuri ath12k_wifi7_dp_rx_wbm_err(dp_pdev, napi, msdu, &msdu_list);
20291a6a4b6cSPavankumar Nandeshwar }
20301a6a4b6cSPavankumar Nandeshwar rcu_read_unlock();
20311a6a4b6cSPavankumar Nandeshwar done:
20321a6a4b6cSPavankumar Nandeshwar return total_num_buffs_reaped;
20331a6a4b6cSPavankumar Nandeshwar }
20348dc72a6fSPavankumar Nandeshwar
ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base * ab)20358dc72a6fSPavankumar Nandeshwar int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab)
20368dc72a6fSPavankumar Nandeshwar {
20373a52762bSRipan Deuri struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
20388dc72a6fSPavankumar Nandeshwar struct htt_rx_ring_tlv_filter tlv_filter = {};
20398dc72a6fSPavankumar Nandeshwar u32 ring_id;
20408dc72a6fSPavankumar Nandeshwar int ret;
20418dc72a6fSPavankumar Nandeshwar u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
20428dc72a6fSPavankumar Nandeshwar
20438dc72a6fSPavankumar Nandeshwar ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
20448dc72a6fSPavankumar Nandeshwar
20458dc72a6fSPavankumar Nandeshwar tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING;
20468dc72a6fSPavankumar Nandeshwar tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR;
20478dc72a6fSPavankumar Nandeshwar tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST |
20488dc72a6fSPavankumar Nandeshwar HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST |
20498dc72a6fSPavankumar Nandeshwar HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA;
20508dc72a6fSPavankumar Nandeshwar tlv_filter.offset_valid = true;
20518dc72a6fSPavankumar Nandeshwar tlv_filter.rx_packet_offset = hal_rx_desc_sz;
20528dc72a6fSPavankumar Nandeshwar
20538dc72a6fSPavankumar Nandeshwar tlv_filter.rx_mpdu_start_offset =
205452537339SPavankumar Nandeshwar ath12k_hal_rx_desc_get_mpdu_start_offset_qcn9274();
20558dc72a6fSPavankumar Nandeshwar tlv_filter.rx_msdu_end_offset =
205652537339SPavankumar Nandeshwar ath12k_hal_rx_desc_get_msdu_end_offset_qcn9274();
20578dc72a6fSPavankumar Nandeshwar
205852537339SPavankumar Nandeshwar tlv_filter.rx_mpdu_start_wmask = ath12k_hal_rx_mpdu_start_wmask_get_qcn9274();
205952537339SPavankumar Nandeshwar tlv_filter.rx_msdu_end_wmask = ath12k_hal_rx_msdu_end_wmask_get_qcn9274();
20608dc72a6fSPavankumar Nandeshwar ath12k_dbg(ab, ATH12K_DBG_DATA,
20618dc72a6fSPavankumar Nandeshwar "Configuring compact tlv masks rx_mpdu_start_wmask 0x%x rx_msdu_end_wmask 0x%x\n",
20628dc72a6fSPavankumar Nandeshwar tlv_filter.rx_mpdu_start_wmask, tlv_filter.rx_msdu_end_wmask);
20638dc72a6fSPavankumar Nandeshwar
20648dc72a6fSPavankumar Nandeshwar ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, 0,
20658dc72a6fSPavankumar Nandeshwar HAL_RXDMA_BUF,
20668dc72a6fSPavankumar Nandeshwar DP_RXDMA_REFILL_RING_SIZE,
20678dc72a6fSPavankumar Nandeshwar &tlv_filter);
20688dc72a6fSPavankumar Nandeshwar
20698dc72a6fSPavankumar Nandeshwar return ret;
20708dc72a6fSPavankumar Nandeshwar }
20718dc72a6fSPavankumar Nandeshwar
ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base * ab)20728dc72a6fSPavankumar Nandeshwar int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
20738dc72a6fSPavankumar Nandeshwar {
20743a52762bSRipan Deuri struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
20758dc72a6fSPavankumar Nandeshwar struct htt_rx_ring_tlv_filter tlv_filter = {};
20768dc72a6fSPavankumar Nandeshwar u32 ring_id;
20778dc72a6fSPavankumar Nandeshwar int ret = 0;
20788dc72a6fSPavankumar Nandeshwar u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
20798dc72a6fSPavankumar Nandeshwar int i;
20808dc72a6fSPavankumar Nandeshwar
20818dc72a6fSPavankumar Nandeshwar ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
20828dc72a6fSPavankumar Nandeshwar
20838dc72a6fSPavankumar Nandeshwar tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING;
20848dc72a6fSPavankumar Nandeshwar tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR;
20858dc72a6fSPavankumar Nandeshwar tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST |
20868dc72a6fSPavankumar Nandeshwar HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST |
20878dc72a6fSPavankumar Nandeshwar HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA;
20888dc72a6fSPavankumar Nandeshwar tlv_filter.offset_valid = true;
20898dc72a6fSPavankumar Nandeshwar tlv_filter.rx_packet_offset = hal_rx_desc_sz;
20908dc72a6fSPavankumar Nandeshwar
20918dc72a6fSPavankumar Nandeshwar tlv_filter.rx_header_offset = offsetof(struct hal_rx_desc_wcn7850, pkt_hdr_tlv);
20928dc72a6fSPavankumar Nandeshwar
20938dc72a6fSPavankumar Nandeshwar tlv_filter.rx_mpdu_start_offset =
209452537339SPavankumar Nandeshwar ath12k_hal_rx_desc_get_mpdu_start_offset_wcn7850();
20958dc72a6fSPavankumar Nandeshwar tlv_filter.rx_msdu_end_offset =
209652537339SPavankumar Nandeshwar ath12k_hal_rx_desc_get_msdu_end_offset_wcn7850();
20978dc72a6fSPavankumar Nandeshwar
20988dc72a6fSPavankumar Nandeshwar /* TODO: Selectively subscribe to required qwords within msdu_end
20998dc72a6fSPavankumar Nandeshwar * and mpdu_start and setup the mask in below msg
21008dc72a6fSPavankumar Nandeshwar * and modify the rx_desc struct
21018dc72a6fSPavankumar Nandeshwar */
21028dc72a6fSPavankumar Nandeshwar
21038dc72a6fSPavankumar Nandeshwar for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
21048dc72a6fSPavankumar Nandeshwar ring_id = dp->rx_mac_buf_ring[i].ring_id;
21058dc72a6fSPavankumar Nandeshwar ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, i,
21068dc72a6fSPavankumar Nandeshwar HAL_RXDMA_BUF,
21078dc72a6fSPavankumar Nandeshwar DP_RXDMA_REFILL_RING_SIZE,
21088dc72a6fSPavankumar Nandeshwar &tlv_filter);
21098dc72a6fSPavankumar Nandeshwar }
21108dc72a6fSPavankumar Nandeshwar
21118dc72a6fSPavankumar Nandeshwar return ret;
21128dc72a6fSPavankumar Nandeshwar }
21135d2df2aaSPavankumar Nandeshwar
ath12k_dp_rxdma_ring_sel_config_qcc2072(struct ath12k_base * ab)2114023ace9fSBaochen Qiang int ath12k_dp_rxdma_ring_sel_config_qcc2072(struct ath12k_base *ab)
2115023ace9fSBaochen Qiang {
2116023ace9fSBaochen Qiang struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
2117023ace9fSBaochen Qiang struct htt_rx_ring_tlv_filter tlv_filter = {};
2118023ace9fSBaochen Qiang u32 ring_id;
2119023ace9fSBaochen Qiang int ret = 0;
2120023ace9fSBaochen Qiang u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
2121023ace9fSBaochen Qiang int i;
2122023ace9fSBaochen Qiang
2123023ace9fSBaochen Qiang ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
2124023ace9fSBaochen Qiang
2125023ace9fSBaochen Qiang tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING;
2126023ace9fSBaochen Qiang tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR;
2127023ace9fSBaochen Qiang tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST |
2128023ace9fSBaochen Qiang HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST |
2129023ace9fSBaochen Qiang HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA;
2130023ace9fSBaochen Qiang tlv_filter.offset_valid = true;
2131023ace9fSBaochen Qiang tlv_filter.rx_packet_offset = hal_rx_desc_sz;
2132023ace9fSBaochen Qiang
2133023ace9fSBaochen Qiang tlv_filter.rx_header_offset = offsetof(struct hal_rx_desc_qcc2072, pkt_hdr_tlv);
2134023ace9fSBaochen Qiang
2135023ace9fSBaochen Qiang tlv_filter.rx_mpdu_start_offset =
2136023ace9fSBaochen Qiang ath12k_hal_rx_desc_get_mpdu_start_offset_qcc2072();
2137023ace9fSBaochen Qiang tlv_filter.rx_msdu_end_offset =
2138023ace9fSBaochen Qiang ath12k_hal_rx_desc_get_msdu_end_offset_qcc2072();
2139023ace9fSBaochen Qiang
2140023ace9fSBaochen Qiang /*
2141023ace9fSBaochen Qiang * TODO: Selectively subscribe to required qwords within msdu_end
2142023ace9fSBaochen Qiang * and mpdu_start and setup the mask in below msg
2143023ace9fSBaochen Qiang * and modify the rx_desc struct
2144023ace9fSBaochen Qiang */
2145023ace9fSBaochen Qiang
2146023ace9fSBaochen Qiang for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
2147023ace9fSBaochen Qiang ring_id = dp->rx_mac_buf_ring[i].ring_id;
2148023ace9fSBaochen Qiang ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, i,
2149023ace9fSBaochen Qiang HAL_RXDMA_BUF,
2150023ace9fSBaochen Qiang DP_RXDMA_REFILL_RING_SIZE,
2151023ace9fSBaochen Qiang &tlv_filter);
2152023ace9fSBaochen Qiang }
2153023ace9fSBaochen Qiang
2154023ace9fSBaochen Qiang return ret;
2155023ace9fSBaochen Qiang }
2156023ace9fSBaochen Qiang
ath12k_wifi7_dp_rx_process_reo_status(struct ath12k_dp * dp)215796b42732SPavankumar Nandeshwar void ath12k_wifi7_dp_rx_process_reo_status(struct ath12k_dp *dp)
21585d2df2aaSPavankumar Nandeshwar {
215996b42732SPavankumar Nandeshwar struct ath12k_base *ab = dp->ab;
216096b42732SPavankumar Nandeshwar struct ath12k_hal *hal = dp->hal;
21615d2df2aaSPavankumar Nandeshwar struct hal_srng *srng;
21625d2df2aaSPavankumar Nandeshwar struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
21635d2df2aaSPavankumar Nandeshwar bool found = false;
21645d2df2aaSPavankumar Nandeshwar u16 tag;
21655d2df2aaSPavankumar Nandeshwar struct hal_reo_status reo_status;
21661f165022SBaochen Qiang void *hdr, *desc;
21675d2df2aaSPavankumar Nandeshwar
216896b42732SPavankumar Nandeshwar srng = &hal->srng_list[dp->reo_status_ring.ring_id];
21695d2df2aaSPavankumar Nandeshwar
21705d2df2aaSPavankumar Nandeshwar memset(&reo_status, 0, sizeof(reo_status));
21715d2df2aaSPavankumar Nandeshwar
21725d2df2aaSPavankumar Nandeshwar spin_lock_bh(&srng->lock);
21735d2df2aaSPavankumar Nandeshwar
21745d2df2aaSPavankumar Nandeshwar ath12k_hal_srng_access_begin(ab, srng);
21755d2df2aaSPavankumar Nandeshwar
21765d2df2aaSPavankumar Nandeshwar while ((hdr = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
21771f165022SBaochen Qiang tag = hal->ops->reo_status_dec_tlv_hdr(hdr, &desc);
21785d2df2aaSPavankumar Nandeshwar
21795d2df2aaSPavankumar Nandeshwar switch (tag) {
21805d2df2aaSPavankumar Nandeshwar case HAL_REO_GET_QUEUE_STATS_STATUS:
21811f165022SBaochen Qiang ath12k_wifi7_hal_reo_status_queue_stats(ab, desc,
21825d2df2aaSPavankumar Nandeshwar &reo_status);
21835d2df2aaSPavankumar Nandeshwar break;
21845d2df2aaSPavankumar Nandeshwar case HAL_REO_FLUSH_QUEUE_STATUS:
21851f165022SBaochen Qiang ath12k_wifi7_hal_reo_flush_queue_status(ab, desc,
21865d2df2aaSPavankumar Nandeshwar &reo_status);
21875d2df2aaSPavankumar Nandeshwar break;
21885d2df2aaSPavankumar Nandeshwar case HAL_REO_FLUSH_CACHE_STATUS:
21891f165022SBaochen Qiang ath12k_wifi7_hal_reo_flush_cache_status(ab, desc,
21905d2df2aaSPavankumar Nandeshwar &reo_status);
21915d2df2aaSPavankumar Nandeshwar break;
21925d2df2aaSPavankumar Nandeshwar case HAL_REO_UNBLOCK_CACHE_STATUS:
21931f165022SBaochen Qiang ath12k_wifi7_hal_reo_unblk_cache_status(ab, desc,
21945d2df2aaSPavankumar Nandeshwar &reo_status);
21955d2df2aaSPavankumar Nandeshwar break;
21965d2df2aaSPavankumar Nandeshwar case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
21971f165022SBaochen Qiang ath12k_wifi7_hal_reo_flush_timeout_list_status(ab, desc,
21985d2df2aaSPavankumar Nandeshwar &reo_status);
21995d2df2aaSPavankumar Nandeshwar break;
22005d2df2aaSPavankumar Nandeshwar case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
22011f165022SBaochen Qiang ath12k_wifi7_hal_reo_desc_thresh_reached_status(ab, desc,
22025d2df2aaSPavankumar Nandeshwar &reo_status);
22035d2df2aaSPavankumar Nandeshwar break;
22045d2df2aaSPavankumar Nandeshwar case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
22051f165022SBaochen Qiang ath12k_wifi7_hal_reo_update_rx_reo_queue_status(ab, desc,
22065d2df2aaSPavankumar Nandeshwar &reo_status);
22075d2df2aaSPavankumar Nandeshwar break;
22085d2df2aaSPavankumar Nandeshwar default:
22095d2df2aaSPavankumar Nandeshwar ath12k_warn(ab, "Unknown reo status type %d\n", tag);
22105d2df2aaSPavankumar Nandeshwar continue;
22115d2df2aaSPavankumar Nandeshwar }
22125d2df2aaSPavankumar Nandeshwar
22135d2df2aaSPavankumar Nandeshwar spin_lock_bh(&dp->reo_cmd_lock);
22145d2df2aaSPavankumar Nandeshwar list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
22155d2df2aaSPavankumar Nandeshwar if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
22165d2df2aaSPavankumar Nandeshwar found = true;
22175d2df2aaSPavankumar Nandeshwar list_del(&cmd->list);
22185d2df2aaSPavankumar Nandeshwar break;
22195d2df2aaSPavankumar Nandeshwar }
22205d2df2aaSPavankumar Nandeshwar }
22215d2df2aaSPavankumar Nandeshwar spin_unlock_bh(&dp->reo_cmd_lock);
22225d2df2aaSPavankumar Nandeshwar
22235d2df2aaSPavankumar Nandeshwar if (found) {
22245d2df2aaSPavankumar Nandeshwar cmd->handler(dp, (void *)&cmd->data,
22255d2df2aaSPavankumar Nandeshwar reo_status.uniform_hdr.cmd_status);
22265d2df2aaSPavankumar Nandeshwar kfree(cmd);
22275d2df2aaSPavankumar Nandeshwar }
22285d2df2aaSPavankumar Nandeshwar
22295d2df2aaSPavankumar Nandeshwar found = false;
22305d2df2aaSPavankumar Nandeshwar }
22315d2df2aaSPavankumar Nandeshwar
22325d2df2aaSPavankumar Nandeshwar ath12k_hal_srng_access_end(ab, srng);
22335d2df2aaSPavankumar Nandeshwar
22345d2df2aaSPavankumar Nandeshwar spin_unlock_bh(&srng->lock);
22355d2df2aaSPavankumar Nandeshwar }
2236906b5ac9SAlok Singh
2237906b5ac9SAlok Singh bool
ath12k_wifi7_dp_rxdesc_mpdu_valid(struct ath12k_base * ab,struct hal_rx_desc * rx_desc)2238906b5ac9SAlok Singh ath12k_wifi7_dp_rxdesc_mpdu_valid(struct ath12k_base *ab,
2239906b5ac9SAlok Singh struct hal_rx_desc *rx_desc)
2240906b5ac9SAlok Singh {
2241906b5ac9SAlok Singh u32 tlv_tag;
2242906b5ac9SAlok Singh
2243906b5ac9SAlok Singh tlv_tag = ab->hal.ops->rx_desc_get_mpdu_start_tag(rx_desc);
2244906b5ac9SAlok Singh
2245906b5ac9SAlok Singh return tlv_tag == HAL_RX_MPDU_START;
2246906b5ac9SAlok Singh }
2247