1da8fa4e3SBjoern A. Zeeb // SPDX-License-Identifier: ISC
2da8fa4e3SBjoern A. Zeeb /*
3da8fa4e3SBjoern A. Zeeb * Copyright (c) 2005-2011 Atheros Communications Inc.
4da8fa4e3SBjoern A. Zeeb * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5da8fa4e3SBjoern A. Zeeb * Copyright (c) 2018 The Linux Foundation. All rights reserved.
6da8fa4e3SBjoern A. Zeeb */
7da8fa4e3SBjoern A. Zeeb
8da8fa4e3SBjoern A. Zeeb #include "hif.h"
9da8fa4e3SBjoern A. Zeeb #include "ce.h"
10da8fa4e3SBjoern A. Zeeb #include "debug.h"
11da8fa4e3SBjoern A. Zeeb
12da8fa4e3SBjoern A. Zeeb /*
13da8fa4e3SBjoern A. Zeeb * Support for Copy Engine hardware, which is mainly used for
14da8fa4e3SBjoern A. Zeeb * communication between Host and Target over a PCIe interconnect.
15da8fa4e3SBjoern A. Zeeb */
16da8fa4e3SBjoern A. Zeeb
17da8fa4e3SBjoern A. Zeeb /*
18da8fa4e3SBjoern A. Zeeb * A single CopyEngine (CE) comprises two "rings":
19da8fa4e3SBjoern A. Zeeb * a source ring
20da8fa4e3SBjoern A. Zeeb * a destination ring
21da8fa4e3SBjoern A. Zeeb *
22da8fa4e3SBjoern A. Zeeb * Each ring consists of a number of descriptors which specify
23da8fa4e3SBjoern A. Zeeb * an address, length, and meta-data.
24da8fa4e3SBjoern A. Zeeb *
25da8fa4e3SBjoern A. Zeeb * Typically, one side of the PCIe/AHB/SNOC interconnect (Host or Target)
26da8fa4e3SBjoern A. Zeeb * controls one ring and the other side controls the other ring.
27da8fa4e3SBjoern A. Zeeb * The source side chooses when to initiate a transfer and it
28da8fa4e3SBjoern A. Zeeb * chooses what to send (buffer address, length). The destination
29da8fa4e3SBjoern A. Zeeb * side keeps a supply of "anonymous receive buffers" available and
30da8fa4e3SBjoern A. Zeeb * it handles incoming data as it arrives (when the destination
31da8fa4e3SBjoern A. Zeeb * receives an interrupt).
32da8fa4e3SBjoern A. Zeeb *
33da8fa4e3SBjoern A. Zeeb * The sender may send a simple buffer (address/length) or it may
34da8fa4e3SBjoern A. Zeeb * send a small list of buffers. When a small list is sent, hardware
35da8fa4e3SBjoern A. Zeeb * "gathers" these and they end up in a single destination buffer
36da8fa4e3SBjoern A. Zeeb * with a single interrupt.
37da8fa4e3SBjoern A. Zeeb *
38da8fa4e3SBjoern A. Zeeb * There are several "contexts" managed by this layer -- more, it
39da8fa4e3SBjoern A. Zeeb * may seem -- than should be needed. These are provided mainly for
40da8fa4e3SBjoern A. Zeeb * maximum flexibility and especially to facilitate a simpler HIF
41da8fa4e3SBjoern A. Zeeb * implementation. There are per-CopyEngine recv, send, and watermark
42da8fa4e3SBjoern A. Zeeb * contexts. These are supplied by the caller when a recv, send,
43da8fa4e3SBjoern A. Zeeb * or watermark handler is established and they are echoed back to
44da8fa4e3SBjoern A. Zeeb * the caller when the respective callbacks are invoked. There is
45da8fa4e3SBjoern A. Zeeb * also a per-transfer context supplied by the caller when a buffer
46da8fa4e3SBjoern A. Zeeb * (or sendlist) is sent and when a buffer is enqueued for recv.
47da8fa4e3SBjoern A. Zeeb * These per-transfer contexts are echoed back to the caller when
48da8fa4e3SBjoern A. Zeeb * the buffer is sent/received.
49da8fa4e3SBjoern A. Zeeb */
50da8fa4e3SBjoern A. Zeeb
shadow_sr_wr_ind_addr(struct ath10k * ar,struct ath10k_ce_pipe * ce_state)51da8fa4e3SBjoern A. Zeeb static inline u32 shadow_sr_wr_ind_addr(struct ath10k *ar,
52da8fa4e3SBjoern A. Zeeb struct ath10k_ce_pipe *ce_state)
53da8fa4e3SBjoern A. Zeeb {
54da8fa4e3SBjoern A. Zeeb u32 ce_id = ce_state->id;
55da8fa4e3SBjoern A. Zeeb u32 addr = 0;
56da8fa4e3SBjoern A. Zeeb
57da8fa4e3SBjoern A. Zeeb switch (ce_id) {
58da8fa4e3SBjoern A. Zeeb case 0:
59da8fa4e3SBjoern A. Zeeb addr = 0x00032000;
60da8fa4e3SBjoern A. Zeeb break;
61da8fa4e3SBjoern A. Zeeb case 3:
62da8fa4e3SBjoern A. Zeeb addr = 0x0003200C;
63da8fa4e3SBjoern A. Zeeb break;
64da8fa4e3SBjoern A. Zeeb case 4:
65da8fa4e3SBjoern A. Zeeb addr = 0x00032010;
66da8fa4e3SBjoern A. Zeeb break;
67da8fa4e3SBjoern A. Zeeb case 5:
68da8fa4e3SBjoern A. Zeeb addr = 0x00032014;
69da8fa4e3SBjoern A. Zeeb break;
70da8fa4e3SBjoern A. Zeeb case 7:
71da8fa4e3SBjoern A. Zeeb addr = 0x0003201C;
72da8fa4e3SBjoern A. Zeeb break;
73da8fa4e3SBjoern A. Zeeb default:
74da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "invalid CE id: %d", ce_id);
75da8fa4e3SBjoern A. Zeeb break;
76da8fa4e3SBjoern A. Zeeb }
77da8fa4e3SBjoern A. Zeeb return addr;
78da8fa4e3SBjoern A. Zeeb }
79da8fa4e3SBjoern A. Zeeb
80da8fa4e3SBjoern A. Zeeb static inline unsigned int
ath10k_set_ring_byte(unsigned int offset,struct ath10k_hw_ce_regs_addr_map * addr_map)81da8fa4e3SBjoern A. Zeeb ath10k_set_ring_byte(unsigned int offset,
82da8fa4e3SBjoern A. Zeeb struct ath10k_hw_ce_regs_addr_map *addr_map)
83da8fa4e3SBjoern A. Zeeb {
84da8fa4e3SBjoern A. Zeeb return ((offset << addr_map->lsb) & addr_map->mask);
85da8fa4e3SBjoern A. Zeeb }
86da8fa4e3SBjoern A. Zeeb
ath10k_ce_read32(struct ath10k * ar,u32 offset)87da8fa4e3SBjoern A. Zeeb static inline u32 ath10k_ce_read32(struct ath10k *ar, u32 offset)
88da8fa4e3SBjoern A. Zeeb {
89da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
90da8fa4e3SBjoern A. Zeeb
91da8fa4e3SBjoern A. Zeeb return ce->bus_ops->read32(ar, offset);
92da8fa4e3SBjoern A. Zeeb }
93da8fa4e3SBjoern A. Zeeb
ath10k_ce_write32(struct ath10k * ar,u32 offset,u32 value)94da8fa4e3SBjoern A. Zeeb static inline void ath10k_ce_write32(struct ath10k *ar, u32 offset, u32 value)
95da8fa4e3SBjoern A. Zeeb {
96da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
97da8fa4e3SBjoern A. Zeeb
98da8fa4e3SBjoern A. Zeeb ce->bus_ops->write32(ar, offset, value);
99da8fa4e3SBjoern A. Zeeb }
100da8fa4e3SBjoern A. Zeeb
ath10k_ce_dest_ring_write_index_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)101da8fa4e3SBjoern A. Zeeb static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
102da8fa4e3SBjoern A. Zeeb u32 ce_ctrl_addr,
103da8fa4e3SBjoern A. Zeeb unsigned int n)
104da8fa4e3SBjoern A. Zeeb {
105da8fa4e3SBjoern A. Zeeb ath10k_ce_write32(ar, ce_ctrl_addr +
106da8fa4e3SBjoern A. Zeeb ar->hw_ce_regs->dst_wr_index_addr, n);
107da8fa4e3SBjoern A. Zeeb }
108da8fa4e3SBjoern A. Zeeb
ath10k_ce_dest_ring_write_index_get(struct ath10k * ar,u32 ce_ctrl_addr)109da8fa4e3SBjoern A. Zeeb static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
110da8fa4e3SBjoern A. Zeeb u32 ce_ctrl_addr)
111da8fa4e3SBjoern A. Zeeb {
112da8fa4e3SBjoern A. Zeeb return ath10k_ce_read32(ar, ce_ctrl_addr +
113da8fa4e3SBjoern A. Zeeb ar->hw_ce_regs->dst_wr_index_addr);
114da8fa4e3SBjoern A. Zeeb }
115da8fa4e3SBjoern A. Zeeb
ath10k_ce_src_ring_write_index_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)116da8fa4e3SBjoern A. Zeeb static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
117da8fa4e3SBjoern A. Zeeb u32 ce_ctrl_addr,
118da8fa4e3SBjoern A. Zeeb unsigned int n)
119da8fa4e3SBjoern A. Zeeb {
120da8fa4e3SBjoern A. Zeeb ath10k_ce_write32(ar, ce_ctrl_addr +
121da8fa4e3SBjoern A. Zeeb ar->hw_ce_regs->sr_wr_index_addr, n);
122da8fa4e3SBjoern A. Zeeb }
123da8fa4e3SBjoern A. Zeeb
ath10k_ce_src_ring_write_index_get(struct ath10k * ar,u32 ce_ctrl_addr)124da8fa4e3SBjoern A. Zeeb static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
125da8fa4e3SBjoern A. Zeeb u32 ce_ctrl_addr)
126da8fa4e3SBjoern A. Zeeb {
127da8fa4e3SBjoern A. Zeeb return ath10k_ce_read32(ar, ce_ctrl_addr +
128da8fa4e3SBjoern A. Zeeb ar->hw_ce_regs->sr_wr_index_addr);
129da8fa4e3SBjoern A. Zeeb }
130da8fa4e3SBjoern A. Zeeb
ath10k_ce_src_ring_read_index_from_ddr(struct ath10k * ar,u32 ce_id)131da8fa4e3SBjoern A. Zeeb static inline u32 ath10k_ce_src_ring_read_index_from_ddr(struct ath10k *ar,
132da8fa4e3SBjoern A. Zeeb u32 ce_id)
133da8fa4e3SBjoern A. Zeeb {
134da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
135da8fa4e3SBjoern A. Zeeb
136da8fa4e3SBjoern A. Zeeb return ce->vaddr_rri[ce_id] & CE_DDR_RRI_MASK;
137da8fa4e3SBjoern A. Zeeb }
138da8fa4e3SBjoern A. Zeeb
ath10k_ce_src_ring_read_index_get(struct ath10k * ar,u32 ce_ctrl_addr)139da8fa4e3SBjoern A. Zeeb static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
140da8fa4e3SBjoern A. Zeeb u32 ce_ctrl_addr)
141da8fa4e3SBjoern A. Zeeb {
142da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
143da8fa4e3SBjoern A. Zeeb u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr);
144da8fa4e3SBjoern A. Zeeb struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
145da8fa4e3SBjoern A. Zeeb u32 index;
146da8fa4e3SBjoern A. Zeeb
147da8fa4e3SBjoern A. Zeeb if (ar->hw_params.rri_on_ddr &&
148da8fa4e3SBjoern A. Zeeb (ce_state->attr_flags & CE_ATTR_DIS_INTR))
149da8fa4e3SBjoern A. Zeeb index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_id);
150da8fa4e3SBjoern A. Zeeb else
151da8fa4e3SBjoern A. Zeeb index = ath10k_ce_read32(ar, ce_ctrl_addr +
152da8fa4e3SBjoern A. Zeeb ar->hw_ce_regs->current_srri_addr);
153da8fa4e3SBjoern A. Zeeb
154da8fa4e3SBjoern A. Zeeb return index;
155da8fa4e3SBjoern A. Zeeb }
156da8fa4e3SBjoern A. Zeeb
157da8fa4e3SBjoern A. Zeeb static inline void
ath10k_ce_shadow_src_ring_write_index_set(struct ath10k * ar,struct ath10k_ce_pipe * ce_state,unsigned int value)158da8fa4e3SBjoern A. Zeeb ath10k_ce_shadow_src_ring_write_index_set(struct ath10k *ar,
159da8fa4e3SBjoern A. Zeeb struct ath10k_ce_pipe *ce_state,
160da8fa4e3SBjoern A. Zeeb unsigned int value)
161da8fa4e3SBjoern A. Zeeb {
162da8fa4e3SBjoern A. Zeeb ath10k_ce_write32(ar, shadow_sr_wr_ind_addr(ar, ce_state), value);
163da8fa4e3SBjoern A. Zeeb }
164da8fa4e3SBjoern A. Zeeb
ath10k_ce_src_ring_base_addr_set(struct ath10k * ar,u32 ce_id,u64 addr)165da8fa4e3SBjoern A. Zeeb static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
166da8fa4e3SBjoern A. Zeeb u32 ce_id,
167da8fa4e3SBjoern A. Zeeb u64 addr)
168da8fa4e3SBjoern A. Zeeb {
169da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
170da8fa4e3SBjoern A. Zeeb struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
171da8fa4e3SBjoern A. Zeeb u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id);
172da8fa4e3SBjoern A. Zeeb u32 addr_lo = lower_32_bits(addr);
173da8fa4e3SBjoern A. Zeeb
174da8fa4e3SBjoern A. Zeeb ath10k_ce_write32(ar, ce_ctrl_addr +
175da8fa4e3SBjoern A. Zeeb ar->hw_ce_regs->sr_base_addr_lo, addr_lo);
176da8fa4e3SBjoern A. Zeeb
177da8fa4e3SBjoern A. Zeeb if (ce_state->ops->ce_set_src_ring_base_addr_hi) {
178da8fa4e3SBjoern A. Zeeb ce_state->ops->ce_set_src_ring_base_addr_hi(ar, ce_ctrl_addr,
179da8fa4e3SBjoern A. Zeeb addr);
180da8fa4e3SBjoern A. Zeeb }
181da8fa4e3SBjoern A. Zeeb }
182da8fa4e3SBjoern A. Zeeb
ath10k_ce_set_src_ring_base_addr_hi(struct ath10k * ar,u32 ce_ctrl_addr,u64 addr)183da8fa4e3SBjoern A. Zeeb static void ath10k_ce_set_src_ring_base_addr_hi(struct ath10k *ar,
184da8fa4e3SBjoern A. Zeeb u32 ce_ctrl_addr,
185da8fa4e3SBjoern A. Zeeb u64 addr)
186da8fa4e3SBjoern A. Zeeb {
187da8fa4e3SBjoern A. Zeeb u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK;
188da8fa4e3SBjoern A. Zeeb
189da8fa4e3SBjoern A. Zeeb ath10k_ce_write32(ar, ce_ctrl_addr +
190da8fa4e3SBjoern A. Zeeb ar->hw_ce_regs->sr_base_addr_hi, addr_hi);
191da8fa4e3SBjoern A. Zeeb }
192da8fa4e3SBjoern A. Zeeb
ath10k_ce_src_ring_size_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)193da8fa4e3SBjoern A. Zeeb static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
194da8fa4e3SBjoern A. Zeeb u32 ce_ctrl_addr,
195da8fa4e3SBjoern A. Zeeb unsigned int n)
196da8fa4e3SBjoern A. Zeeb {
197da8fa4e3SBjoern A. Zeeb ath10k_ce_write32(ar, ce_ctrl_addr +
198da8fa4e3SBjoern A. Zeeb ar->hw_ce_regs->sr_size_addr, n);
199da8fa4e3SBjoern A. Zeeb }
200da8fa4e3SBjoern A. Zeeb
ath10k_ce_src_ring_dmax_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)201da8fa4e3SBjoern A. Zeeb static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
202da8fa4e3SBjoern A. Zeeb u32 ce_ctrl_addr,
203da8fa4e3SBjoern A. Zeeb unsigned int n)
204da8fa4e3SBjoern A. Zeeb {
205da8fa4e3SBjoern A. Zeeb struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
206da8fa4e3SBjoern A. Zeeb
207da8fa4e3SBjoern A. Zeeb u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
208da8fa4e3SBjoern A. Zeeb ctrl_regs->addr);
209da8fa4e3SBjoern A. Zeeb
210da8fa4e3SBjoern A. Zeeb ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
211da8fa4e3SBjoern A. Zeeb (ctrl1_addr & ~(ctrl_regs->dmax->mask)) |
212da8fa4e3SBjoern A. Zeeb ath10k_set_ring_byte(n, ctrl_regs->dmax));
213da8fa4e3SBjoern A. Zeeb }
214da8fa4e3SBjoern A. Zeeb
ath10k_ce_src_ring_byte_swap_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)215da8fa4e3SBjoern A. Zeeb static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
216da8fa4e3SBjoern A. Zeeb u32 ce_ctrl_addr,
217da8fa4e3SBjoern A. Zeeb unsigned int n)
218da8fa4e3SBjoern A. Zeeb {
219da8fa4e3SBjoern A. Zeeb struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
220da8fa4e3SBjoern A. Zeeb
221da8fa4e3SBjoern A. Zeeb u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
222da8fa4e3SBjoern A. Zeeb ctrl_regs->addr);
223da8fa4e3SBjoern A. Zeeb
224da8fa4e3SBjoern A. Zeeb ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
225da8fa4e3SBjoern A. Zeeb (ctrl1_addr & ~(ctrl_regs->src_ring->mask)) |
226da8fa4e3SBjoern A. Zeeb ath10k_set_ring_byte(n, ctrl_regs->src_ring));
227da8fa4e3SBjoern A. Zeeb }
228da8fa4e3SBjoern A. Zeeb
ath10k_ce_dest_ring_byte_swap_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)229da8fa4e3SBjoern A. Zeeb static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
230da8fa4e3SBjoern A. Zeeb u32 ce_ctrl_addr,
231da8fa4e3SBjoern A. Zeeb unsigned int n)
232da8fa4e3SBjoern A. Zeeb {
233da8fa4e3SBjoern A. Zeeb struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
234da8fa4e3SBjoern A. Zeeb
235da8fa4e3SBjoern A. Zeeb u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
236da8fa4e3SBjoern A. Zeeb ctrl_regs->addr);
237da8fa4e3SBjoern A. Zeeb
238da8fa4e3SBjoern A. Zeeb ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
239da8fa4e3SBjoern A. Zeeb (ctrl1_addr & ~(ctrl_regs->dst_ring->mask)) |
240da8fa4e3SBjoern A. Zeeb ath10k_set_ring_byte(n, ctrl_regs->dst_ring));
241da8fa4e3SBjoern A. Zeeb }
242da8fa4e3SBjoern A. Zeeb
243da8fa4e3SBjoern A. Zeeb static inline
ath10k_ce_dest_ring_read_index_from_ddr(struct ath10k * ar,u32 ce_id)244da8fa4e3SBjoern A. Zeeb u32 ath10k_ce_dest_ring_read_index_from_ddr(struct ath10k *ar, u32 ce_id)
245da8fa4e3SBjoern A. Zeeb {
246da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
247da8fa4e3SBjoern A. Zeeb
248da8fa4e3SBjoern A. Zeeb return (ce->vaddr_rri[ce_id] >> CE_DDR_DRRI_SHIFT) &
249da8fa4e3SBjoern A. Zeeb CE_DDR_RRI_MASK;
250da8fa4e3SBjoern A. Zeeb }
251da8fa4e3SBjoern A. Zeeb
ath10k_ce_dest_ring_read_index_get(struct ath10k * ar,u32 ce_ctrl_addr)252da8fa4e3SBjoern A. Zeeb static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
253da8fa4e3SBjoern A. Zeeb u32 ce_ctrl_addr)
254da8fa4e3SBjoern A. Zeeb {
255da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
256da8fa4e3SBjoern A. Zeeb u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr);
257da8fa4e3SBjoern A. Zeeb struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
258da8fa4e3SBjoern A. Zeeb u32 index;
259da8fa4e3SBjoern A. Zeeb
260da8fa4e3SBjoern A. Zeeb if (ar->hw_params.rri_on_ddr &&
261da8fa4e3SBjoern A. Zeeb (ce_state->attr_flags & CE_ATTR_DIS_INTR))
262da8fa4e3SBjoern A. Zeeb index = ath10k_ce_dest_ring_read_index_from_ddr(ar, ce_id);
263da8fa4e3SBjoern A. Zeeb else
264da8fa4e3SBjoern A. Zeeb index = ath10k_ce_read32(ar, ce_ctrl_addr +
265da8fa4e3SBjoern A. Zeeb ar->hw_ce_regs->current_drri_addr);
266da8fa4e3SBjoern A. Zeeb
267da8fa4e3SBjoern A. Zeeb return index;
268da8fa4e3SBjoern A. Zeeb }
269da8fa4e3SBjoern A. Zeeb
ath10k_ce_dest_ring_base_addr_set(struct ath10k * ar,u32 ce_id,u64 addr)270da8fa4e3SBjoern A. Zeeb static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
271da8fa4e3SBjoern A. Zeeb u32 ce_id,
272da8fa4e3SBjoern A. Zeeb u64 addr)
273da8fa4e3SBjoern A. Zeeb {
274da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
275da8fa4e3SBjoern A. Zeeb struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
276da8fa4e3SBjoern A. Zeeb u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id);
277da8fa4e3SBjoern A. Zeeb u32 addr_lo = lower_32_bits(addr);
278da8fa4e3SBjoern A. Zeeb
279da8fa4e3SBjoern A. Zeeb ath10k_ce_write32(ar, ce_ctrl_addr +
280da8fa4e3SBjoern A. Zeeb ar->hw_ce_regs->dr_base_addr_lo, addr_lo);
281da8fa4e3SBjoern A. Zeeb
282da8fa4e3SBjoern A. Zeeb if (ce_state->ops->ce_set_dest_ring_base_addr_hi) {
283da8fa4e3SBjoern A. Zeeb ce_state->ops->ce_set_dest_ring_base_addr_hi(ar, ce_ctrl_addr,
284da8fa4e3SBjoern A. Zeeb addr);
285da8fa4e3SBjoern A. Zeeb }
286da8fa4e3SBjoern A. Zeeb }
287da8fa4e3SBjoern A. Zeeb
ath10k_ce_set_dest_ring_base_addr_hi(struct ath10k * ar,u32 ce_ctrl_addr,u64 addr)288da8fa4e3SBjoern A. Zeeb static void ath10k_ce_set_dest_ring_base_addr_hi(struct ath10k *ar,
289da8fa4e3SBjoern A. Zeeb u32 ce_ctrl_addr,
290da8fa4e3SBjoern A. Zeeb u64 addr)
291da8fa4e3SBjoern A. Zeeb {
292da8fa4e3SBjoern A. Zeeb u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK;
293da8fa4e3SBjoern A. Zeeb u32 reg_value;
294da8fa4e3SBjoern A. Zeeb
295da8fa4e3SBjoern A. Zeeb reg_value = ath10k_ce_read32(ar, ce_ctrl_addr +
296da8fa4e3SBjoern A. Zeeb ar->hw_ce_regs->dr_base_addr_hi);
297da8fa4e3SBjoern A. Zeeb reg_value &= ~CE_DESC_ADDR_HI_MASK;
298da8fa4e3SBjoern A. Zeeb reg_value |= addr_hi;
299da8fa4e3SBjoern A. Zeeb ath10k_ce_write32(ar, ce_ctrl_addr +
300da8fa4e3SBjoern A. Zeeb ar->hw_ce_regs->dr_base_addr_hi, reg_value);
301da8fa4e3SBjoern A. Zeeb }
302da8fa4e3SBjoern A. Zeeb
ath10k_ce_dest_ring_size_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)303da8fa4e3SBjoern A. Zeeb static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
304da8fa4e3SBjoern A. Zeeb u32 ce_ctrl_addr,
305da8fa4e3SBjoern A. Zeeb unsigned int n)
306da8fa4e3SBjoern A. Zeeb {
307da8fa4e3SBjoern A. Zeeb ath10k_ce_write32(ar, ce_ctrl_addr +
308da8fa4e3SBjoern A. Zeeb ar->hw_ce_regs->dr_size_addr, n);
309da8fa4e3SBjoern A. Zeeb }
310da8fa4e3SBjoern A. Zeeb
ath10k_ce_src_ring_highmark_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)311da8fa4e3SBjoern A. Zeeb static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
312da8fa4e3SBjoern A. Zeeb u32 ce_ctrl_addr,
313da8fa4e3SBjoern A. Zeeb unsigned int n)
314da8fa4e3SBjoern A. Zeeb {
315da8fa4e3SBjoern A. Zeeb struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
316da8fa4e3SBjoern A. Zeeb u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
317da8fa4e3SBjoern A. Zeeb
318da8fa4e3SBjoern A. Zeeb ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
319da8fa4e3SBjoern A. Zeeb (addr & ~(srcr_wm->wm_high->mask)) |
320da8fa4e3SBjoern A. Zeeb (ath10k_set_ring_byte(n, srcr_wm->wm_high)));
321da8fa4e3SBjoern A. Zeeb }
322da8fa4e3SBjoern A. Zeeb
ath10k_ce_src_ring_lowmark_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)323da8fa4e3SBjoern A. Zeeb static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
324da8fa4e3SBjoern A. Zeeb u32 ce_ctrl_addr,
325da8fa4e3SBjoern A. Zeeb unsigned int n)
326da8fa4e3SBjoern A. Zeeb {
327da8fa4e3SBjoern A. Zeeb struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
328da8fa4e3SBjoern A. Zeeb u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
329da8fa4e3SBjoern A. Zeeb
330da8fa4e3SBjoern A. Zeeb ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
331da8fa4e3SBjoern A. Zeeb (addr & ~(srcr_wm->wm_low->mask)) |
332da8fa4e3SBjoern A. Zeeb (ath10k_set_ring_byte(n, srcr_wm->wm_low)));
333da8fa4e3SBjoern A. Zeeb }
334da8fa4e3SBjoern A. Zeeb
ath10k_ce_dest_ring_highmark_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)335da8fa4e3SBjoern A. Zeeb static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
336da8fa4e3SBjoern A. Zeeb u32 ce_ctrl_addr,
337da8fa4e3SBjoern A. Zeeb unsigned int n)
338da8fa4e3SBjoern A. Zeeb {
339da8fa4e3SBjoern A. Zeeb struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
340da8fa4e3SBjoern A. Zeeb u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
341da8fa4e3SBjoern A. Zeeb
342da8fa4e3SBjoern A. Zeeb ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
343da8fa4e3SBjoern A. Zeeb (addr & ~(dstr_wm->wm_high->mask)) |
344da8fa4e3SBjoern A. Zeeb (ath10k_set_ring_byte(n, dstr_wm->wm_high)));
345da8fa4e3SBjoern A. Zeeb }
346da8fa4e3SBjoern A. Zeeb
ath10k_ce_dest_ring_lowmark_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)347da8fa4e3SBjoern A. Zeeb static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
348da8fa4e3SBjoern A. Zeeb u32 ce_ctrl_addr,
349da8fa4e3SBjoern A. Zeeb unsigned int n)
350da8fa4e3SBjoern A. Zeeb {
351da8fa4e3SBjoern A. Zeeb struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
352da8fa4e3SBjoern A. Zeeb u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
353da8fa4e3SBjoern A. Zeeb
354da8fa4e3SBjoern A. Zeeb ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
355da8fa4e3SBjoern A. Zeeb (addr & ~(dstr_wm->wm_low->mask)) |
356da8fa4e3SBjoern A. Zeeb (ath10k_set_ring_byte(n, dstr_wm->wm_low)));
357da8fa4e3SBjoern A. Zeeb }
358da8fa4e3SBjoern A. Zeeb
ath10k_ce_copy_complete_inter_enable(struct ath10k * ar,u32 ce_ctrl_addr)359da8fa4e3SBjoern A. Zeeb static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
360da8fa4e3SBjoern A. Zeeb u32 ce_ctrl_addr)
361da8fa4e3SBjoern A. Zeeb {
362da8fa4e3SBjoern A. Zeeb struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
363da8fa4e3SBjoern A. Zeeb
364da8fa4e3SBjoern A. Zeeb u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
365da8fa4e3SBjoern A. Zeeb ar->hw_ce_regs->host_ie_addr);
366da8fa4e3SBjoern A. Zeeb
367da8fa4e3SBjoern A. Zeeb ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
368da8fa4e3SBjoern A. Zeeb host_ie_addr | host_ie->copy_complete->mask);
369da8fa4e3SBjoern A. Zeeb }
370da8fa4e3SBjoern A. Zeeb
ath10k_ce_copy_complete_intr_disable(struct ath10k * ar,u32 ce_ctrl_addr)371da8fa4e3SBjoern A. Zeeb static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
372da8fa4e3SBjoern A. Zeeb u32 ce_ctrl_addr)
373da8fa4e3SBjoern A. Zeeb {
374da8fa4e3SBjoern A. Zeeb struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
375da8fa4e3SBjoern A. Zeeb
376da8fa4e3SBjoern A. Zeeb u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
377da8fa4e3SBjoern A. Zeeb ar->hw_ce_regs->host_ie_addr);
378da8fa4e3SBjoern A. Zeeb
379da8fa4e3SBjoern A. Zeeb ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
380da8fa4e3SBjoern A. Zeeb host_ie_addr & ~(host_ie->copy_complete->mask));
381da8fa4e3SBjoern A. Zeeb }
382da8fa4e3SBjoern A. Zeeb
ath10k_ce_watermark_intr_disable(struct ath10k * ar,u32 ce_ctrl_addr)383da8fa4e3SBjoern A. Zeeb static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
384da8fa4e3SBjoern A. Zeeb u32 ce_ctrl_addr)
385da8fa4e3SBjoern A. Zeeb {
386da8fa4e3SBjoern A. Zeeb struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
387da8fa4e3SBjoern A. Zeeb
388da8fa4e3SBjoern A. Zeeb u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
389da8fa4e3SBjoern A. Zeeb ar->hw_ce_regs->host_ie_addr);
390da8fa4e3SBjoern A. Zeeb
391da8fa4e3SBjoern A. Zeeb ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
392da8fa4e3SBjoern A. Zeeb host_ie_addr & ~(wm_regs->wm_mask));
393da8fa4e3SBjoern A. Zeeb }
394da8fa4e3SBjoern A. Zeeb
ath10k_ce_error_intr_disable(struct ath10k * ar,u32 ce_ctrl_addr)395da8fa4e3SBjoern A. Zeeb static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
396da8fa4e3SBjoern A. Zeeb u32 ce_ctrl_addr)
397da8fa4e3SBjoern A. Zeeb {
398da8fa4e3SBjoern A. Zeeb struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
399da8fa4e3SBjoern A. Zeeb
400da8fa4e3SBjoern A. Zeeb u32 misc_ie_addr = ath10k_ce_read32(ar,
401da8fa4e3SBjoern A. Zeeb ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr);
402da8fa4e3SBjoern A. Zeeb
403da8fa4e3SBjoern A. Zeeb ath10k_ce_write32(ar,
404da8fa4e3SBjoern A. Zeeb ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr,
405da8fa4e3SBjoern A. Zeeb misc_ie_addr & ~(misc_regs->err_mask));
406da8fa4e3SBjoern A. Zeeb }
407da8fa4e3SBjoern A. Zeeb
ath10k_ce_engine_int_status_clear(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int mask)408da8fa4e3SBjoern A. Zeeb static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
409da8fa4e3SBjoern A. Zeeb u32 ce_ctrl_addr,
410da8fa4e3SBjoern A. Zeeb unsigned int mask)
411da8fa4e3SBjoern A. Zeeb {
412da8fa4e3SBjoern A. Zeeb struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
413da8fa4e3SBjoern A. Zeeb
414da8fa4e3SBjoern A. Zeeb ath10k_ce_write32(ar, ce_ctrl_addr + wm_regs->addr, mask);
415da8fa4e3SBjoern A. Zeeb }
416da8fa4e3SBjoern A. Zeeb
417da8fa4e3SBjoern A. Zeeb /*
418da8fa4e3SBjoern A. Zeeb * Guts of ath10k_ce_send.
419da8fa4e3SBjoern A. Zeeb * The caller takes responsibility for any needed locking.
420da8fa4e3SBjoern A. Zeeb */
_ath10k_ce_send_nolock(struct ath10k_ce_pipe * ce_state,void * per_transfer_context,dma_addr_t buffer,unsigned int nbytes,unsigned int transfer_id,unsigned int flags)421da8fa4e3SBjoern A. Zeeb static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
422da8fa4e3SBjoern A. Zeeb void *per_transfer_context,
423da8fa4e3SBjoern A. Zeeb dma_addr_t buffer,
424da8fa4e3SBjoern A. Zeeb unsigned int nbytes,
425da8fa4e3SBjoern A. Zeeb unsigned int transfer_id,
426da8fa4e3SBjoern A. Zeeb unsigned int flags)
427da8fa4e3SBjoern A. Zeeb {
428da8fa4e3SBjoern A. Zeeb struct ath10k *ar = ce_state->ar;
429da8fa4e3SBjoern A. Zeeb struct ath10k_ce_ring *src_ring = ce_state->src_ring;
430da8fa4e3SBjoern A. Zeeb struct ce_desc *desc, sdesc;
431da8fa4e3SBjoern A. Zeeb unsigned int nentries_mask = src_ring->nentries_mask;
432da8fa4e3SBjoern A. Zeeb unsigned int sw_index = src_ring->sw_index;
433da8fa4e3SBjoern A. Zeeb unsigned int write_index = src_ring->write_index;
434da8fa4e3SBjoern A. Zeeb u32 ctrl_addr = ce_state->ctrl_addr;
435da8fa4e3SBjoern A. Zeeb u32 desc_flags = 0;
436da8fa4e3SBjoern A. Zeeb int ret = 0;
437da8fa4e3SBjoern A. Zeeb
438da8fa4e3SBjoern A. Zeeb if (nbytes > ce_state->src_sz_max)
439da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
440da8fa4e3SBjoern A. Zeeb __func__, nbytes, ce_state->src_sz_max);
441da8fa4e3SBjoern A. Zeeb
442da8fa4e3SBjoern A. Zeeb if (unlikely(CE_RING_DELTA(nentries_mask,
443da8fa4e3SBjoern A. Zeeb write_index, sw_index - 1) <= 0)) {
444da8fa4e3SBjoern A. Zeeb ret = -ENOSR;
445da8fa4e3SBjoern A. Zeeb goto exit;
446da8fa4e3SBjoern A. Zeeb }
447da8fa4e3SBjoern A. Zeeb
448da8fa4e3SBjoern A. Zeeb desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
449da8fa4e3SBjoern A. Zeeb write_index);
450da8fa4e3SBjoern A. Zeeb
451da8fa4e3SBjoern A. Zeeb desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
452da8fa4e3SBjoern A. Zeeb
453da8fa4e3SBjoern A. Zeeb if (flags & CE_SEND_FLAG_GATHER)
454da8fa4e3SBjoern A. Zeeb desc_flags |= CE_DESC_FLAGS_GATHER;
455da8fa4e3SBjoern A. Zeeb if (flags & CE_SEND_FLAG_BYTE_SWAP)
456da8fa4e3SBjoern A. Zeeb desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
457da8fa4e3SBjoern A. Zeeb
458da8fa4e3SBjoern A. Zeeb sdesc.addr = __cpu_to_le32(buffer);
459da8fa4e3SBjoern A. Zeeb sdesc.nbytes = __cpu_to_le16(nbytes);
460da8fa4e3SBjoern A. Zeeb sdesc.flags = __cpu_to_le16(desc_flags);
461da8fa4e3SBjoern A. Zeeb
462da8fa4e3SBjoern A. Zeeb *desc = sdesc;
463da8fa4e3SBjoern A. Zeeb
464da8fa4e3SBjoern A. Zeeb src_ring->per_transfer_context[write_index] = per_transfer_context;
465da8fa4e3SBjoern A. Zeeb
466da8fa4e3SBjoern A. Zeeb /* Update Source Ring Write Index */
467da8fa4e3SBjoern A. Zeeb write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
468da8fa4e3SBjoern A. Zeeb
469da8fa4e3SBjoern A. Zeeb /* WORKAROUND */
470da8fa4e3SBjoern A. Zeeb if (!(flags & CE_SEND_FLAG_GATHER))
471da8fa4e3SBjoern A. Zeeb ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
472da8fa4e3SBjoern A. Zeeb
473da8fa4e3SBjoern A. Zeeb src_ring->write_index = write_index;
474da8fa4e3SBjoern A. Zeeb exit:
475da8fa4e3SBjoern A. Zeeb return ret;
476da8fa4e3SBjoern A. Zeeb }
477da8fa4e3SBjoern A. Zeeb
_ath10k_ce_send_nolock_64(struct ath10k_ce_pipe * ce_state,void * per_transfer_context,dma_addr_t buffer,unsigned int nbytes,unsigned int transfer_id,unsigned int flags)478da8fa4e3SBjoern A. Zeeb static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state,
479da8fa4e3SBjoern A. Zeeb void *per_transfer_context,
480da8fa4e3SBjoern A. Zeeb dma_addr_t buffer,
481da8fa4e3SBjoern A. Zeeb unsigned int nbytes,
482da8fa4e3SBjoern A. Zeeb unsigned int transfer_id,
483da8fa4e3SBjoern A. Zeeb unsigned int flags)
484da8fa4e3SBjoern A. Zeeb {
485da8fa4e3SBjoern A. Zeeb struct ath10k *ar = ce_state->ar;
486da8fa4e3SBjoern A. Zeeb struct ath10k_ce_ring *src_ring = ce_state->src_ring;
487da8fa4e3SBjoern A. Zeeb struct ce_desc_64 *desc, sdesc;
488da8fa4e3SBjoern A. Zeeb unsigned int nentries_mask = src_ring->nentries_mask;
489da8fa4e3SBjoern A. Zeeb unsigned int sw_index;
490da8fa4e3SBjoern A. Zeeb unsigned int write_index = src_ring->write_index;
491da8fa4e3SBjoern A. Zeeb u32 ctrl_addr = ce_state->ctrl_addr;
492da8fa4e3SBjoern A. Zeeb __le32 *addr;
493da8fa4e3SBjoern A. Zeeb u32 desc_flags = 0;
494da8fa4e3SBjoern A. Zeeb int ret = 0;
495da8fa4e3SBjoern A. Zeeb
496da8fa4e3SBjoern A. Zeeb if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
497da8fa4e3SBjoern A. Zeeb return -ESHUTDOWN;
498da8fa4e3SBjoern A. Zeeb
499da8fa4e3SBjoern A. Zeeb if (nbytes > ce_state->src_sz_max)
500da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
501da8fa4e3SBjoern A. Zeeb __func__, nbytes, ce_state->src_sz_max);
502da8fa4e3SBjoern A. Zeeb
503da8fa4e3SBjoern A. Zeeb if (ar->hw_params.rri_on_ddr)
504da8fa4e3SBjoern A. Zeeb sw_index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_state->id);
505da8fa4e3SBjoern A. Zeeb else
506da8fa4e3SBjoern A. Zeeb sw_index = src_ring->sw_index;
507da8fa4e3SBjoern A. Zeeb
508da8fa4e3SBjoern A. Zeeb if (unlikely(CE_RING_DELTA(nentries_mask,
509da8fa4e3SBjoern A. Zeeb write_index, sw_index - 1) <= 0)) {
510da8fa4e3SBjoern A. Zeeb ret = -ENOSR;
511da8fa4e3SBjoern A. Zeeb goto exit;
512da8fa4e3SBjoern A. Zeeb }
513da8fa4e3SBjoern A. Zeeb
514da8fa4e3SBjoern A. Zeeb desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space,
515da8fa4e3SBjoern A. Zeeb write_index);
516da8fa4e3SBjoern A. Zeeb
517da8fa4e3SBjoern A. Zeeb desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
518da8fa4e3SBjoern A. Zeeb
519da8fa4e3SBjoern A. Zeeb if (flags & CE_SEND_FLAG_GATHER)
520da8fa4e3SBjoern A. Zeeb desc_flags |= CE_DESC_FLAGS_GATHER;
521da8fa4e3SBjoern A. Zeeb
522da8fa4e3SBjoern A. Zeeb if (flags & CE_SEND_FLAG_BYTE_SWAP)
523da8fa4e3SBjoern A. Zeeb desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
524da8fa4e3SBjoern A. Zeeb
525da8fa4e3SBjoern A. Zeeb addr = (__le32 *)&sdesc.addr;
526da8fa4e3SBjoern A. Zeeb
527da8fa4e3SBjoern A. Zeeb flags |= upper_32_bits(buffer) & CE_DESC_ADDR_HI_MASK;
528da8fa4e3SBjoern A. Zeeb addr[0] = __cpu_to_le32(buffer);
529da8fa4e3SBjoern A. Zeeb addr[1] = __cpu_to_le32(flags);
530da8fa4e3SBjoern A. Zeeb if (flags & CE_SEND_FLAG_GATHER)
531da8fa4e3SBjoern A. Zeeb addr[1] |= __cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER);
532da8fa4e3SBjoern A. Zeeb else
533da8fa4e3SBjoern A. Zeeb addr[1] &= ~(__cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER));
534da8fa4e3SBjoern A. Zeeb
535da8fa4e3SBjoern A. Zeeb sdesc.nbytes = __cpu_to_le16(nbytes);
536da8fa4e3SBjoern A. Zeeb sdesc.flags = __cpu_to_le16(desc_flags);
537da8fa4e3SBjoern A. Zeeb
538da8fa4e3SBjoern A. Zeeb *desc = sdesc;
539da8fa4e3SBjoern A. Zeeb
540da8fa4e3SBjoern A. Zeeb src_ring->per_transfer_context[write_index] = per_transfer_context;
541da8fa4e3SBjoern A. Zeeb
542da8fa4e3SBjoern A. Zeeb /* Update Source Ring Write Index */
543da8fa4e3SBjoern A. Zeeb write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
544da8fa4e3SBjoern A. Zeeb
545da8fa4e3SBjoern A. Zeeb if (!(flags & CE_SEND_FLAG_GATHER)) {
546da8fa4e3SBjoern A. Zeeb if (ar->hw_params.shadow_reg_support)
547da8fa4e3SBjoern A. Zeeb ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state,
548da8fa4e3SBjoern A. Zeeb write_index);
549da8fa4e3SBjoern A. Zeeb else
550da8fa4e3SBjoern A. Zeeb ath10k_ce_src_ring_write_index_set(ar, ctrl_addr,
551da8fa4e3SBjoern A. Zeeb write_index);
552da8fa4e3SBjoern A. Zeeb }
553da8fa4e3SBjoern A. Zeeb
554da8fa4e3SBjoern A. Zeeb src_ring->write_index = write_index;
555da8fa4e3SBjoern A. Zeeb exit:
556da8fa4e3SBjoern A. Zeeb return ret;
557da8fa4e3SBjoern A. Zeeb }
558da8fa4e3SBjoern A. Zeeb
ath10k_ce_send_nolock(struct ath10k_ce_pipe * ce_state,void * per_transfer_context,dma_addr_t buffer,unsigned int nbytes,unsigned int transfer_id,unsigned int flags)559da8fa4e3SBjoern A. Zeeb int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
560da8fa4e3SBjoern A. Zeeb void *per_transfer_context,
561da8fa4e3SBjoern A. Zeeb dma_addr_t buffer,
562da8fa4e3SBjoern A. Zeeb unsigned int nbytes,
563da8fa4e3SBjoern A. Zeeb unsigned int transfer_id,
564da8fa4e3SBjoern A. Zeeb unsigned int flags)
565da8fa4e3SBjoern A. Zeeb {
566da8fa4e3SBjoern A. Zeeb return ce_state->ops->ce_send_nolock(ce_state, per_transfer_context,
567da8fa4e3SBjoern A. Zeeb buffer, nbytes, transfer_id, flags);
568da8fa4e3SBjoern A. Zeeb }
569da8fa4e3SBjoern A. Zeeb EXPORT_SYMBOL(ath10k_ce_send_nolock);
570da8fa4e3SBjoern A. Zeeb
__ath10k_ce_send_revert(struct ath10k_ce_pipe * pipe)571da8fa4e3SBjoern A. Zeeb void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
572da8fa4e3SBjoern A. Zeeb {
573da8fa4e3SBjoern A. Zeeb struct ath10k *ar = pipe->ar;
574da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
575da8fa4e3SBjoern A. Zeeb struct ath10k_ce_ring *src_ring = pipe->src_ring;
576da8fa4e3SBjoern A. Zeeb u32 ctrl_addr = pipe->ctrl_addr;
577da8fa4e3SBjoern A. Zeeb
578da8fa4e3SBjoern A. Zeeb lockdep_assert_held(&ce->ce_lock);
579da8fa4e3SBjoern A. Zeeb
580da8fa4e3SBjoern A. Zeeb /*
581da8fa4e3SBjoern A. Zeeb * This function must be called only if there is an incomplete
582da8fa4e3SBjoern A. Zeeb * scatter-gather transfer (before index register is updated)
583da8fa4e3SBjoern A. Zeeb * that needs to be cleaned up.
584da8fa4e3SBjoern A. Zeeb */
585da8fa4e3SBjoern A. Zeeb if (WARN_ON_ONCE(src_ring->write_index == src_ring->sw_index))
586da8fa4e3SBjoern A. Zeeb return;
587da8fa4e3SBjoern A. Zeeb
588da8fa4e3SBjoern A. Zeeb if (WARN_ON_ONCE(src_ring->write_index ==
589da8fa4e3SBjoern A. Zeeb ath10k_ce_src_ring_write_index_get(ar, ctrl_addr)))
590da8fa4e3SBjoern A. Zeeb return;
591da8fa4e3SBjoern A. Zeeb
592da8fa4e3SBjoern A. Zeeb src_ring->write_index--;
593da8fa4e3SBjoern A. Zeeb src_ring->write_index &= src_ring->nentries_mask;
594da8fa4e3SBjoern A. Zeeb
595da8fa4e3SBjoern A. Zeeb src_ring->per_transfer_context[src_ring->write_index] = NULL;
596da8fa4e3SBjoern A. Zeeb }
597da8fa4e3SBjoern A. Zeeb EXPORT_SYMBOL(__ath10k_ce_send_revert);
598da8fa4e3SBjoern A. Zeeb
ath10k_ce_send(struct ath10k_ce_pipe * ce_state,void * per_transfer_context,dma_addr_t buffer,unsigned int nbytes,unsigned int transfer_id,unsigned int flags)599da8fa4e3SBjoern A. Zeeb int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
600da8fa4e3SBjoern A. Zeeb void *per_transfer_context,
601da8fa4e3SBjoern A. Zeeb dma_addr_t buffer,
602da8fa4e3SBjoern A. Zeeb unsigned int nbytes,
603da8fa4e3SBjoern A. Zeeb unsigned int transfer_id,
604da8fa4e3SBjoern A. Zeeb unsigned int flags)
605da8fa4e3SBjoern A. Zeeb {
606da8fa4e3SBjoern A. Zeeb struct ath10k *ar = ce_state->ar;
607da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
608da8fa4e3SBjoern A. Zeeb int ret;
609da8fa4e3SBjoern A. Zeeb
610da8fa4e3SBjoern A. Zeeb spin_lock_bh(&ce->ce_lock);
611da8fa4e3SBjoern A. Zeeb ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
612da8fa4e3SBjoern A. Zeeb buffer, nbytes, transfer_id, flags);
613da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ce->ce_lock);
614da8fa4e3SBjoern A. Zeeb
615da8fa4e3SBjoern A. Zeeb return ret;
616da8fa4e3SBjoern A. Zeeb }
617da8fa4e3SBjoern A. Zeeb EXPORT_SYMBOL(ath10k_ce_send);
618da8fa4e3SBjoern A. Zeeb
ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe * pipe)619da8fa4e3SBjoern A. Zeeb int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
620da8fa4e3SBjoern A. Zeeb {
621da8fa4e3SBjoern A. Zeeb struct ath10k *ar = pipe->ar;
622da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
623da8fa4e3SBjoern A. Zeeb int delta;
624da8fa4e3SBjoern A. Zeeb
625da8fa4e3SBjoern A. Zeeb spin_lock_bh(&ce->ce_lock);
626da8fa4e3SBjoern A. Zeeb delta = CE_RING_DELTA(pipe->src_ring->nentries_mask,
627da8fa4e3SBjoern A. Zeeb pipe->src_ring->write_index,
628da8fa4e3SBjoern A. Zeeb pipe->src_ring->sw_index - 1);
629da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ce->ce_lock);
630da8fa4e3SBjoern A. Zeeb
631da8fa4e3SBjoern A. Zeeb return delta;
632da8fa4e3SBjoern A. Zeeb }
633da8fa4e3SBjoern A. Zeeb EXPORT_SYMBOL(ath10k_ce_num_free_src_entries);
634da8fa4e3SBjoern A. Zeeb
__ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe * pipe)635da8fa4e3SBjoern A. Zeeb int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
636da8fa4e3SBjoern A. Zeeb {
637da8fa4e3SBjoern A. Zeeb struct ath10k *ar = pipe->ar;
638da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
639da8fa4e3SBjoern A. Zeeb struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
640da8fa4e3SBjoern A. Zeeb unsigned int nentries_mask = dest_ring->nentries_mask;
641da8fa4e3SBjoern A. Zeeb unsigned int write_index = dest_ring->write_index;
642da8fa4e3SBjoern A. Zeeb unsigned int sw_index = dest_ring->sw_index;
643da8fa4e3SBjoern A. Zeeb
644da8fa4e3SBjoern A. Zeeb lockdep_assert_held(&ce->ce_lock);
645da8fa4e3SBjoern A. Zeeb
646da8fa4e3SBjoern A. Zeeb return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
647da8fa4e3SBjoern A. Zeeb }
648da8fa4e3SBjoern A. Zeeb EXPORT_SYMBOL(__ath10k_ce_rx_num_free_bufs);
649da8fa4e3SBjoern A. Zeeb
__ath10k_ce_rx_post_buf(struct ath10k_ce_pipe * pipe,void * ctx,dma_addr_t paddr)650da8fa4e3SBjoern A. Zeeb static int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
651da8fa4e3SBjoern A. Zeeb dma_addr_t paddr)
652da8fa4e3SBjoern A. Zeeb {
653da8fa4e3SBjoern A. Zeeb struct ath10k *ar = pipe->ar;
654da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
655da8fa4e3SBjoern A. Zeeb struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
656da8fa4e3SBjoern A. Zeeb unsigned int nentries_mask = dest_ring->nentries_mask;
657da8fa4e3SBjoern A. Zeeb unsigned int write_index = dest_ring->write_index;
658da8fa4e3SBjoern A. Zeeb unsigned int sw_index = dest_ring->sw_index;
659da8fa4e3SBjoern A. Zeeb struct ce_desc *base = dest_ring->base_addr_owner_space;
660da8fa4e3SBjoern A. Zeeb struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
661da8fa4e3SBjoern A. Zeeb u32 ctrl_addr = pipe->ctrl_addr;
662da8fa4e3SBjoern A. Zeeb
663da8fa4e3SBjoern A. Zeeb lockdep_assert_held(&ce->ce_lock);
664da8fa4e3SBjoern A. Zeeb
665da8fa4e3SBjoern A. Zeeb if ((pipe->id != 5) &&
666da8fa4e3SBjoern A. Zeeb CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
667da8fa4e3SBjoern A. Zeeb return -ENOSPC;
668da8fa4e3SBjoern A. Zeeb
669da8fa4e3SBjoern A. Zeeb desc->addr = __cpu_to_le32(paddr);
670da8fa4e3SBjoern A. Zeeb desc->nbytes = 0;
671da8fa4e3SBjoern A. Zeeb
672da8fa4e3SBjoern A. Zeeb dest_ring->per_transfer_context[write_index] = ctx;
673da8fa4e3SBjoern A. Zeeb write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
674da8fa4e3SBjoern A. Zeeb ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
675da8fa4e3SBjoern A. Zeeb dest_ring->write_index = write_index;
676da8fa4e3SBjoern A. Zeeb
677da8fa4e3SBjoern A. Zeeb return 0;
678da8fa4e3SBjoern A. Zeeb }
679da8fa4e3SBjoern A. Zeeb
__ath10k_ce_rx_post_buf_64(struct ath10k_ce_pipe * pipe,void * ctx,dma_addr_t paddr)680da8fa4e3SBjoern A. Zeeb static int __ath10k_ce_rx_post_buf_64(struct ath10k_ce_pipe *pipe,
681da8fa4e3SBjoern A. Zeeb void *ctx,
682da8fa4e3SBjoern A. Zeeb dma_addr_t paddr)
683da8fa4e3SBjoern A. Zeeb {
684da8fa4e3SBjoern A. Zeeb struct ath10k *ar = pipe->ar;
685da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
686da8fa4e3SBjoern A. Zeeb struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
687da8fa4e3SBjoern A. Zeeb unsigned int nentries_mask = dest_ring->nentries_mask;
688da8fa4e3SBjoern A. Zeeb unsigned int write_index = dest_ring->write_index;
689da8fa4e3SBjoern A. Zeeb unsigned int sw_index = dest_ring->sw_index;
690da8fa4e3SBjoern A. Zeeb struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
691da8fa4e3SBjoern A. Zeeb struct ce_desc_64 *desc =
692da8fa4e3SBjoern A. Zeeb CE_DEST_RING_TO_DESC_64(base, write_index);
693da8fa4e3SBjoern A. Zeeb u32 ctrl_addr = pipe->ctrl_addr;
694da8fa4e3SBjoern A. Zeeb
695da8fa4e3SBjoern A. Zeeb lockdep_assert_held(&ce->ce_lock);
696da8fa4e3SBjoern A. Zeeb
697da8fa4e3SBjoern A. Zeeb if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
698da8fa4e3SBjoern A. Zeeb return -ENOSPC;
699da8fa4e3SBjoern A. Zeeb
700da8fa4e3SBjoern A. Zeeb desc->addr = __cpu_to_le64(paddr);
701da8fa4e3SBjoern A. Zeeb desc->addr &= __cpu_to_le64(CE_DESC_ADDR_MASK);
702da8fa4e3SBjoern A. Zeeb
703da8fa4e3SBjoern A. Zeeb desc->nbytes = 0;
704da8fa4e3SBjoern A. Zeeb
705da8fa4e3SBjoern A. Zeeb dest_ring->per_transfer_context[write_index] = ctx;
706da8fa4e3SBjoern A. Zeeb write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
707da8fa4e3SBjoern A. Zeeb ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
708da8fa4e3SBjoern A. Zeeb dest_ring->write_index = write_index;
709da8fa4e3SBjoern A. Zeeb
710da8fa4e3SBjoern A. Zeeb return 0;
711da8fa4e3SBjoern A. Zeeb }
712da8fa4e3SBjoern A. Zeeb
ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe * pipe,u32 nentries)713da8fa4e3SBjoern A. Zeeb void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
714da8fa4e3SBjoern A. Zeeb {
715da8fa4e3SBjoern A. Zeeb struct ath10k *ar = pipe->ar;
716da8fa4e3SBjoern A. Zeeb struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
717da8fa4e3SBjoern A. Zeeb unsigned int nentries_mask = dest_ring->nentries_mask;
718da8fa4e3SBjoern A. Zeeb unsigned int write_index = dest_ring->write_index;
719da8fa4e3SBjoern A. Zeeb u32 ctrl_addr = pipe->ctrl_addr;
720da8fa4e3SBjoern A. Zeeb u32 cur_write_idx = ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
721da8fa4e3SBjoern A. Zeeb
722da8fa4e3SBjoern A. Zeeb /* Prevent CE ring stuck issue that will occur when ring is full.
723da8fa4e3SBjoern A. Zeeb * Make sure that write index is 1 less than read index.
724da8fa4e3SBjoern A. Zeeb */
725da8fa4e3SBjoern A. Zeeb if (((cur_write_idx + nentries) & nentries_mask) == dest_ring->sw_index)
726da8fa4e3SBjoern A. Zeeb nentries -= 1;
727da8fa4e3SBjoern A. Zeeb
728da8fa4e3SBjoern A. Zeeb write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries);
729da8fa4e3SBjoern A. Zeeb ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
730da8fa4e3SBjoern A. Zeeb dest_ring->write_index = write_index;
731da8fa4e3SBjoern A. Zeeb }
732da8fa4e3SBjoern A. Zeeb EXPORT_SYMBOL(ath10k_ce_rx_update_write_idx);
733da8fa4e3SBjoern A. Zeeb
ath10k_ce_rx_post_buf(struct ath10k_ce_pipe * pipe,void * ctx,dma_addr_t paddr)734da8fa4e3SBjoern A. Zeeb int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
735da8fa4e3SBjoern A. Zeeb dma_addr_t paddr)
736da8fa4e3SBjoern A. Zeeb {
737da8fa4e3SBjoern A. Zeeb struct ath10k *ar = pipe->ar;
738da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
739da8fa4e3SBjoern A. Zeeb int ret;
740da8fa4e3SBjoern A. Zeeb
741da8fa4e3SBjoern A. Zeeb spin_lock_bh(&ce->ce_lock);
742da8fa4e3SBjoern A. Zeeb ret = pipe->ops->ce_rx_post_buf(pipe, ctx, paddr);
743da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ce->ce_lock);
744da8fa4e3SBjoern A. Zeeb
745da8fa4e3SBjoern A. Zeeb return ret;
746da8fa4e3SBjoern A. Zeeb }
747da8fa4e3SBjoern A. Zeeb EXPORT_SYMBOL(ath10k_ce_rx_post_buf);
748da8fa4e3SBjoern A. Zeeb
749da8fa4e3SBjoern A. Zeeb /*
750da8fa4e3SBjoern A. Zeeb * Guts of ath10k_ce_completed_recv_next.
751da8fa4e3SBjoern A. Zeeb * The caller takes responsibility for any necessary locking.
752da8fa4e3SBjoern A. Zeeb */
753da8fa4e3SBjoern A. Zeeb static int
_ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp,unsigned int * nbytesp)754da8fa4e3SBjoern A. Zeeb _ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
755da8fa4e3SBjoern A. Zeeb void **per_transfer_contextp,
756da8fa4e3SBjoern A. Zeeb unsigned int *nbytesp)
757da8fa4e3SBjoern A. Zeeb {
758da8fa4e3SBjoern A. Zeeb struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
759da8fa4e3SBjoern A. Zeeb unsigned int nentries_mask = dest_ring->nentries_mask;
760da8fa4e3SBjoern A. Zeeb unsigned int sw_index = dest_ring->sw_index;
761da8fa4e3SBjoern A. Zeeb
762da8fa4e3SBjoern A. Zeeb struct ce_desc *base = dest_ring->base_addr_owner_space;
763da8fa4e3SBjoern A. Zeeb struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
764da8fa4e3SBjoern A. Zeeb struct ce_desc sdesc;
765da8fa4e3SBjoern A. Zeeb u16 nbytes;
766da8fa4e3SBjoern A. Zeeb
767da8fa4e3SBjoern A. Zeeb /* Copy in one go for performance reasons */
768da8fa4e3SBjoern A. Zeeb sdesc = *desc;
769da8fa4e3SBjoern A. Zeeb
770da8fa4e3SBjoern A. Zeeb nbytes = __le16_to_cpu(sdesc.nbytes);
771da8fa4e3SBjoern A. Zeeb if (nbytes == 0) {
772da8fa4e3SBjoern A. Zeeb /*
773da8fa4e3SBjoern A. Zeeb * This closes a relatively unusual race where the Host
774da8fa4e3SBjoern A. Zeeb * sees the updated DRRI before the update to the
775da8fa4e3SBjoern A. Zeeb * corresponding descriptor has completed. We treat this
776da8fa4e3SBjoern A. Zeeb * as a descriptor that is not yet done.
777da8fa4e3SBjoern A. Zeeb */
778da8fa4e3SBjoern A. Zeeb return -EIO;
779da8fa4e3SBjoern A. Zeeb }
780da8fa4e3SBjoern A. Zeeb
781da8fa4e3SBjoern A. Zeeb desc->nbytes = 0;
782da8fa4e3SBjoern A. Zeeb
783da8fa4e3SBjoern A. Zeeb /* Return data from completed destination descriptor */
784da8fa4e3SBjoern A. Zeeb *nbytesp = nbytes;
785da8fa4e3SBjoern A. Zeeb
786da8fa4e3SBjoern A. Zeeb if (per_transfer_contextp)
787da8fa4e3SBjoern A. Zeeb *per_transfer_contextp =
788da8fa4e3SBjoern A. Zeeb dest_ring->per_transfer_context[sw_index];
789da8fa4e3SBjoern A. Zeeb
790da8fa4e3SBjoern A. Zeeb /* Copy engine 5 (HTT Rx) will reuse the same transfer context.
791da8fa4e3SBjoern A. Zeeb * So update transfer context all CEs except CE5.
792da8fa4e3SBjoern A. Zeeb */
793da8fa4e3SBjoern A. Zeeb if (ce_state->id != 5)
794da8fa4e3SBjoern A. Zeeb dest_ring->per_transfer_context[sw_index] = NULL;
795da8fa4e3SBjoern A. Zeeb
796da8fa4e3SBjoern A. Zeeb /* Update sw_index */
797da8fa4e3SBjoern A. Zeeb sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
798da8fa4e3SBjoern A. Zeeb dest_ring->sw_index = sw_index;
799da8fa4e3SBjoern A. Zeeb
800da8fa4e3SBjoern A. Zeeb return 0;
801da8fa4e3SBjoern A. Zeeb }
802da8fa4e3SBjoern A. Zeeb
803da8fa4e3SBjoern A. Zeeb static int
_ath10k_ce_completed_recv_next_nolock_64(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp,unsigned int * nbytesp)804da8fa4e3SBjoern A. Zeeb _ath10k_ce_completed_recv_next_nolock_64(struct ath10k_ce_pipe *ce_state,
805da8fa4e3SBjoern A. Zeeb void **per_transfer_contextp,
806da8fa4e3SBjoern A. Zeeb unsigned int *nbytesp)
807da8fa4e3SBjoern A. Zeeb {
808da8fa4e3SBjoern A. Zeeb struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
809da8fa4e3SBjoern A. Zeeb unsigned int nentries_mask = dest_ring->nentries_mask;
810da8fa4e3SBjoern A. Zeeb unsigned int sw_index = dest_ring->sw_index;
811da8fa4e3SBjoern A. Zeeb struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
812da8fa4e3SBjoern A. Zeeb struct ce_desc_64 *desc =
813da8fa4e3SBjoern A. Zeeb CE_DEST_RING_TO_DESC_64(base, sw_index);
814da8fa4e3SBjoern A. Zeeb struct ce_desc_64 sdesc;
815da8fa4e3SBjoern A. Zeeb u16 nbytes;
816da8fa4e3SBjoern A. Zeeb
817da8fa4e3SBjoern A. Zeeb /* Copy in one go for performance reasons */
818da8fa4e3SBjoern A. Zeeb sdesc = *desc;
819da8fa4e3SBjoern A. Zeeb
820da8fa4e3SBjoern A. Zeeb nbytes = __le16_to_cpu(sdesc.nbytes);
821da8fa4e3SBjoern A. Zeeb if (nbytes == 0) {
822da8fa4e3SBjoern A. Zeeb /* This closes a relatively unusual race where the Host
823da8fa4e3SBjoern A. Zeeb * sees the updated DRRI before the update to the
824da8fa4e3SBjoern A. Zeeb * corresponding descriptor has completed. We treat this
825da8fa4e3SBjoern A. Zeeb * as a descriptor that is not yet done.
826da8fa4e3SBjoern A. Zeeb */
827da8fa4e3SBjoern A. Zeeb return -EIO;
828da8fa4e3SBjoern A. Zeeb }
829da8fa4e3SBjoern A. Zeeb
830da8fa4e3SBjoern A. Zeeb desc->nbytes = 0;
831da8fa4e3SBjoern A. Zeeb
832da8fa4e3SBjoern A. Zeeb /* Return data from completed destination descriptor */
833da8fa4e3SBjoern A. Zeeb *nbytesp = nbytes;
834da8fa4e3SBjoern A. Zeeb
835da8fa4e3SBjoern A. Zeeb if (per_transfer_contextp)
836da8fa4e3SBjoern A. Zeeb *per_transfer_contextp =
837da8fa4e3SBjoern A. Zeeb dest_ring->per_transfer_context[sw_index];
838da8fa4e3SBjoern A. Zeeb
839da8fa4e3SBjoern A. Zeeb /* Copy engine 5 (HTT Rx) will reuse the same transfer context.
840da8fa4e3SBjoern A. Zeeb * So update transfer context all CEs except CE5.
841da8fa4e3SBjoern A. Zeeb */
842da8fa4e3SBjoern A. Zeeb if (ce_state->id != 5)
843da8fa4e3SBjoern A. Zeeb dest_ring->per_transfer_context[sw_index] = NULL;
844da8fa4e3SBjoern A. Zeeb
845da8fa4e3SBjoern A. Zeeb /* Update sw_index */
846da8fa4e3SBjoern A. Zeeb sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
847da8fa4e3SBjoern A. Zeeb dest_ring->sw_index = sw_index;
848da8fa4e3SBjoern A. Zeeb
849da8fa4e3SBjoern A. Zeeb return 0;
850da8fa4e3SBjoern A. Zeeb }
851da8fa4e3SBjoern A. Zeeb
ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe * ce_state,void ** per_transfer_ctx,unsigned int * nbytesp)852da8fa4e3SBjoern A. Zeeb int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
853da8fa4e3SBjoern A. Zeeb void **per_transfer_ctx,
854da8fa4e3SBjoern A. Zeeb unsigned int *nbytesp)
855da8fa4e3SBjoern A. Zeeb {
856da8fa4e3SBjoern A. Zeeb return ce_state->ops->ce_completed_recv_next_nolock(ce_state,
857da8fa4e3SBjoern A. Zeeb per_transfer_ctx,
858da8fa4e3SBjoern A. Zeeb nbytesp);
859da8fa4e3SBjoern A. Zeeb }
860da8fa4e3SBjoern A. Zeeb EXPORT_SYMBOL(ath10k_ce_completed_recv_next_nolock);
861da8fa4e3SBjoern A. Zeeb
ath10k_ce_completed_recv_next(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp,unsigned int * nbytesp)862da8fa4e3SBjoern A. Zeeb int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
863da8fa4e3SBjoern A. Zeeb void **per_transfer_contextp,
864da8fa4e3SBjoern A. Zeeb unsigned int *nbytesp)
865da8fa4e3SBjoern A. Zeeb {
866da8fa4e3SBjoern A. Zeeb struct ath10k *ar = ce_state->ar;
867da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
868da8fa4e3SBjoern A. Zeeb int ret;
869da8fa4e3SBjoern A. Zeeb
870da8fa4e3SBjoern A. Zeeb spin_lock_bh(&ce->ce_lock);
871da8fa4e3SBjoern A. Zeeb ret = ce_state->ops->ce_completed_recv_next_nolock(ce_state,
872da8fa4e3SBjoern A. Zeeb per_transfer_contextp,
873da8fa4e3SBjoern A. Zeeb nbytesp);
874da8fa4e3SBjoern A. Zeeb
875da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ce->ce_lock);
876da8fa4e3SBjoern A. Zeeb
877da8fa4e3SBjoern A. Zeeb return ret;
878da8fa4e3SBjoern A. Zeeb }
879da8fa4e3SBjoern A. Zeeb EXPORT_SYMBOL(ath10k_ce_completed_recv_next);
880da8fa4e3SBjoern A. Zeeb
_ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp,dma_addr_t * bufferp)881da8fa4e3SBjoern A. Zeeb static int _ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
882da8fa4e3SBjoern A. Zeeb void **per_transfer_contextp,
883da8fa4e3SBjoern A. Zeeb dma_addr_t *bufferp)
884da8fa4e3SBjoern A. Zeeb {
885da8fa4e3SBjoern A. Zeeb struct ath10k_ce_ring *dest_ring;
886da8fa4e3SBjoern A. Zeeb unsigned int nentries_mask;
887da8fa4e3SBjoern A. Zeeb unsigned int sw_index;
888da8fa4e3SBjoern A. Zeeb unsigned int write_index;
889da8fa4e3SBjoern A. Zeeb int ret;
890da8fa4e3SBjoern A. Zeeb struct ath10k *ar;
891da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce;
892da8fa4e3SBjoern A. Zeeb
893da8fa4e3SBjoern A. Zeeb dest_ring = ce_state->dest_ring;
894da8fa4e3SBjoern A. Zeeb
895da8fa4e3SBjoern A. Zeeb if (!dest_ring)
896da8fa4e3SBjoern A. Zeeb return -EIO;
897da8fa4e3SBjoern A. Zeeb
898da8fa4e3SBjoern A. Zeeb ar = ce_state->ar;
899da8fa4e3SBjoern A. Zeeb ce = ath10k_ce_priv(ar);
900da8fa4e3SBjoern A. Zeeb
901da8fa4e3SBjoern A. Zeeb spin_lock_bh(&ce->ce_lock);
902da8fa4e3SBjoern A. Zeeb
903da8fa4e3SBjoern A. Zeeb nentries_mask = dest_ring->nentries_mask;
904da8fa4e3SBjoern A. Zeeb sw_index = dest_ring->sw_index;
905da8fa4e3SBjoern A. Zeeb write_index = dest_ring->write_index;
906da8fa4e3SBjoern A. Zeeb if (write_index != sw_index) {
907da8fa4e3SBjoern A. Zeeb struct ce_desc *base = dest_ring->base_addr_owner_space;
908da8fa4e3SBjoern A. Zeeb struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
909da8fa4e3SBjoern A. Zeeb
910da8fa4e3SBjoern A. Zeeb /* Return data from completed destination descriptor */
911da8fa4e3SBjoern A. Zeeb *bufferp = __le32_to_cpu(desc->addr);
912da8fa4e3SBjoern A. Zeeb
913da8fa4e3SBjoern A. Zeeb if (per_transfer_contextp)
914da8fa4e3SBjoern A. Zeeb *per_transfer_contextp =
915da8fa4e3SBjoern A. Zeeb dest_ring->per_transfer_context[sw_index];
916da8fa4e3SBjoern A. Zeeb
917da8fa4e3SBjoern A. Zeeb /* sanity */
918da8fa4e3SBjoern A. Zeeb dest_ring->per_transfer_context[sw_index] = NULL;
919da8fa4e3SBjoern A. Zeeb desc->nbytes = 0;
920da8fa4e3SBjoern A. Zeeb
921da8fa4e3SBjoern A. Zeeb /* Update sw_index */
922da8fa4e3SBjoern A. Zeeb sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
923da8fa4e3SBjoern A. Zeeb dest_ring->sw_index = sw_index;
924da8fa4e3SBjoern A. Zeeb ret = 0;
925da8fa4e3SBjoern A. Zeeb } else {
926da8fa4e3SBjoern A. Zeeb ret = -EIO;
927da8fa4e3SBjoern A. Zeeb }
928da8fa4e3SBjoern A. Zeeb
929da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ce->ce_lock);
930da8fa4e3SBjoern A. Zeeb
931da8fa4e3SBjoern A. Zeeb return ret;
932da8fa4e3SBjoern A. Zeeb }
933da8fa4e3SBjoern A. Zeeb
_ath10k_ce_revoke_recv_next_64(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp,dma_addr_t * bufferp)934da8fa4e3SBjoern A. Zeeb static int _ath10k_ce_revoke_recv_next_64(struct ath10k_ce_pipe *ce_state,
935da8fa4e3SBjoern A. Zeeb void **per_transfer_contextp,
936da8fa4e3SBjoern A. Zeeb dma_addr_t *bufferp)
937da8fa4e3SBjoern A. Zeeb {
938da8fa4e3SBjoern A. Zeeb struct ath10k_ce_ring *dest_ring;
939da8fa4e3SBjoern A. Zeeb unsigned int nentries_mask;
940da8fa4e3SBjoern A. Zeeb unsigned int sw_index;
941da8fa4e3SBjoern A. Zeeb unsigned int write_index;
942da8fa4e3SBjoern A. Zeeb int ret;
943da8fa4e3SBjoern A. Zeeb struct ath10k *ar;
944da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce;
945da8fa4e3SBjoern A. Zeeb
946da8fa4e3SBjoern A. Zeeb dest_ring = ce_state->dest_ring;
947da8fa4e3SBjoern A. Zeeb
948da8fa4e3SBjoern A. Zeeb if (!dest_ring)
949da8fa4e3SBjoern A. Zeeb return -EIO;
950da8fa4e3SBjoern A. Zeeb
951da8fa4e3SBjoern A. Zeeb ar = ce_state->ar;
952da8fa4e3SBjoern A. Zeeb ce = ath10k_ce_priv(ar);
953da8fa4e3SBjoern A. Zeeb
954da8fa4e3SBjoern A. Zeeb spin_lock_bh(&ce->ce_lock);
955da8fa4e3SBjoern A. Zeeb
956da8fa4e3SBjoern A. Zeeb nentries_mask = dest_ring->nentries_mask;
957da8fa4e3SBjoern A. Zeeb sw_index = dest_ring->sw_index;
958da8fa4e3SBjoern A. Zeeb write_index = dest_ring->write_index;
959da8fa4e3SBjoern A. Zeeb if (write_index != sw_index) {
960da8fa4e3SBjoern A. Zeeb struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
961da8fa4e3SBjoern A. Zeeb struct ce_desc_64 *desc =
962da8fa4e3SBjoern A. Zeeb CE_DEST_RING_TO_DESC_64(base, sw_index);
963da8fa4e3SBjoern A. Zeeb
964da8fa4e3SBjoern A. Zeeb /* Return data from completed destination descriptor */
965da8fa4e3SBjoern A. Zeeb *bufferp = __le64_to_cpu(desc->addr);
966da8fa4e3SBjoern A. Zeeb
967da8fa4e3SBjoern A. Zeeb if (per_transfer_contextp)
968da8fa4e3SBjoern A. Zeeb *per_transfer_contextp =
969da8fa4e3SBjoern A. Zeeb dest_ring->per_transfer_context[sw_index];
970da8fa4e3SBjoern A. Zeeb
971da8fa4e3SBjoern A. Zeeb /* sanity */
972da8fa4e3SBjoern A. Zeeb dest_ring->per_transfer_context[sw_index] = NULL;
973da8fa4e3SBjoern A. Zeeb desc->nbytes = 0;
974da8fa4e3SBjoern A. Zeeb
975da8fa4e3SBjoern A. Zeeb /* Update sw_index */
976da8fa4e3SBjoern A. Zeeb sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
977da8fa4e3SBjoern A. Zeeb dest_ring->sw_index = sw_index;
978da8fa4e3SBjoern A. Zeeb ret = 0;
979da8fa4e3SBjoern A. Zeeb } else {
980da8fa4e3SBjoern A. Zeeb ret = -EIO;
981da8fa4e3SBjoern A. Zeeb }
982da8fa4e3SBjoern A. Zeeb
983da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ce->ce_lock);
984da8fa4e3SBjoern A. Zeeb
985da8fa4e3SBjoern A. Zeeb return ret;
986da8fa4e3SBjoern A. Zeeb }
987da8fa4e3SBjoern A. Zeeb
ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp,dma_addr_t * bufferp)988da8fa4e3SBjoern A. Zeeb int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
989da8fa4e3SBjoern A. Zeeb void **per_transfer_contextp,
990da8fa4e3SBjoern A. Zeeb dma_addr_t *bufferp)
991da8fa4e3SBjoern A. Zeeb {
992da8fa4e3SBjoern A. Zeeb return ce_state->ops->ce_revoke_recv_next(ce_state,
993da8fa4e3SBjoern A. Zeeb per_transfer_contextp,
994da8fa4e3SBjoern A. Zeeb bufferp);
995da8fa4e3SBjoern A. Zeeb }
996da8fa4e3SBjoern A. Zeeb EXPORT_SYMBOL(ath10k_ce_revoke_recv_next);
997da8fa4e3SBjoern A. Zeeb
998da8fa4e3SBjoern A. Zeeb /*
999da8fa4e3SBjoern A. Zeeb * Guts of ath10k_ce_completed_send_next.
1000da8fa4e3SBjoern A. Zeeb * The caller takes responsibility for any necessary locking.
1001da8fa4e3SBjoern A. Zeeb */
_ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp)1002da8fa4e3SBjoern A. Zeeb static int _ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
1003da8fa4e3SBjoern A. Zeeb void **per_transfer_contextp)
1004da8fa4e3SBjoern A. Zeeb {
1005da8fa4e3SBjoern A. Zeeb struct ath10k_ce_ring *src_ring = ce_state->src_ring;
1006da8fa4e3SBjoern A. Zeeb u32 ctrl_addr = ce_state->ctrl_addr;
1007da8fa4e3SBjoern A. Zeeb struct ath10k *ar = ce_state->ar;
1008da8fa4e3SBjoern A. Zeeb unsigned int nentries_mask = src_ring->nentries_mask;
1009da8fa4e3SBjoern A. Zeeb unsigned int sw_index = src_ring->sw_index;
1010da8fa4e3SBjoern A. Zeeb unsigned int read_index;
1011da8fa4e3SBjoern A. Zeeb struct ce_desc *desc;
1012da8fa4e3SBjoern A. Zeeb
1013da8fa4e3SBjoern A. Zeeb if (src_ring->hw_index == sw_index) {
1014da8fa4e3SBjoern A. Zeeb /*
1015da8fa4e3SBjoern A. Zeeb * The SW completion index has caught up with the cached
1016da8fa4e3SBjoern A. Zeeb * version of the HW completion index.
1017da8fa4e3SBjoern A. Zeeb * Update the cached HW completion index to see whether
1018da8fa4e3SBjoern A. Zeeb * the SW has really caught up to the HW, or if the cached
1019da8fa4e3SBjoern A. Zeeb * value of the HW index has become stale.
1020da8fa4e3SBjoern A. Zeeb */
1021da8fa4e3SBjoern A. Zeeb
1022da8fa4e3SBjoern A. Zeeb read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
1023da8fa4e3SBjoern A. Zeeb if (read_index == 0xffffffff)
1024da8fa4e3SBjoern A. Zeeb return -ENODEV;
1025da8fa4e3SBjoern A. Zeeb
1026da8fa4e3SBjoern A. Zeeb read_index &= nentries_mask;
1027da8fa4e3SBjoern A. Zeeb src_ring->hw_index = read_index;
1028da8fa4e3SBjoern A. Zeeb }
1029da8fa4e3SBjoern A. Zeeb
1030da8fa4e3SBjoern A. Zeeb if (ar->hw_params.rri_on_ddr)
1031da8fa4e3SBjoern A. Zeeb read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
1032da8fa4e3SBjoern A. Zeeb else
1033da8fa4e3SBjoern A. Zeeb read_index = src_ring->hw_index;
1034da8fa4e3SBjoern A. Zeeb
1035da8fa4e3SBjoern A. Zeeb if (read_index == sw_index)
1036da8fa4e3SBjoern A. Zeeb return -EIO;
1037da8fa4e3SBjoern A. Zeeb
1038da8fa4e3SBjoern A. Zeeb if (per_transfer_contextp)
1039da8fa4e3SBjoern A. Zeeb *per_transfer_contextp =
1040da8fa4e3SBjoern A. Zeeb src_ring->per_transfer_context[sw_index];
1041da8fa4e3SBjoern A. Zeeb
1042da8fa4e3SBjoern A. Zeeb /* sanity */
1043da8fa4e3SBjoern A. Zeeb src_ring->per_transfer_context[sw_index] = NULL;
1044da8fa4e3SBjoern A. Zeeb desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
1045da8fa4e3SBjoern A. Zeeb sw_index);
1046da8fa4e3SBjoern A. Zeeb desc->nbytes = 0;
1047da8fa4e3SBjoern A. Zeeb
1048da8fa4e3SBjoern A. Zeeb /* Update sw_index */
1049da8fa4e3SBjoern A. Zeeb sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1050da8fa4e3SBjoern A. Zeeb src_ring->sw_index = sw_index;
1051da8fa4e3SBjoern A. Zeeb
1052da8fa4e3SBjoern A. Zeeb return 0;
1053da8fa4e3SBjoern A. Zeeb }
1054da8fa4e3SBjoern A. Zeeb
_ath10k_ce_completed_send_next_nolock_64(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp)1055da8fa4e3SBjoern A. Zeeb static int _ath10k_ce_completed_send_next_nolock_64(struct ath10k_ce_pipe *ce_state,
1056da8fa4e3SBjoern A. Zeeb void **per_transfer_contextp)
1057da8fa4e3SBjoern A. Zeeb {
1058da8fa4e3SBjoern A. Zeeb struct ath10k_ce_ring *src_ring = ce_state->src_ring;
1059da8fa4e3SBjoern A. Zeeb u32 ctrl_addr = ce_state->ctrl_addr;
1060da8fa4e3SBjoern A. Zeeb struct ath10k *ar = ce_state->ar;
1061da8fa4e3SBjoern A. Zeeb unsigned int nentries_mask = src_ring->nentries_mask;
1062da8fa4e3SBjoern A. Zeeb unsigned int sw_index = src_ring->sw_index;
1063da8fa4e3SBjoern A. Zeeb unsigned int read_index;
1064da8fa4e3SBjoern A. Zeeb struct ce_desc_64 *desc;
1065da8fa4e3SBjoern A. Zeeb
1066da8fa4e3SBjoern A. Zeeb if (src_ring->hw_index == sw_index) {
1067da8fa4e3SBjoern A. Zeeb /*
1068da8fa4e3SBjoern A. Zeeb * The SW completion index has caught up with the cached
1069da8fa4e3SBjoern A. Zeeb * version of the HW completion index.
1070da8fa4e3SBjoern A. Zeeb * Update the cached HW completion index to see whether
1071da8fa4e3SBjoern A. Zeeb * the SW has really caught up to the HW, or if the cached
1072da8fa4e3SBjoern A. Zeeb * value of the HW index has become stale.
1073da8fa4e3SBjoern A. Zeeb */
1074da8fa4e3SBjoern A. Zeeb
1075da8fa4e3SBjoern A. Zeeb read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
1076da8fa4e3SBjoern A. Zeeb if (read_index == 0xffffffff)
1077da8fa4e3SBjoern A. Zeeb return -ENODEV;
1078da8fa4e3SBjoern A. Zeeb
1079da8fa4e3SBjoern A. Zeeb read_index &= nentries_mask;
1080da8fa4e3SBjoern A. Zeeb src_ring->hw_index = read_index;
1081da8fa4e3SBjoern A. Zeeb }
1082da8fa4e3SBjoern A. Zeeb
1083da8fa4e3SBjoern A. Zeeb if (ar->hw_params.rri_on_ddr)
1084da8fa4e3SBjoern A. Zeeb read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
1085da8fa4e3SBjoern A. Zeeb else
1086da8fa4e3SBjoern A. Zeeb read_index = src_ring->hw_index;
1087da8fa4e3SBjoern A. Zeeb
1088da8fa4e3SBjoern A. Zeeb if (read_index == sw_index)
1089da8fa4e3SBjoern A. Zeeb return -EIO;
1090da8fa4e3SBjoern A. Zeeb
1091da8fa4e3SBjoern A. Zeeb if (per_transfer_contextp)
1092da8fa4e3SBjoern A. Zeeb *per_transfer_contextp =
1093da8fa4e3SBjoern A. Zeeb src_ring->per_transfer_context[sw_index];
1094da8fa4e3SBjoern A. Zeeb
1095da8fa4e3SBjoern A. Zeeb /* sanity */
1096da8fa4e3SBjoern A. Zeeb src_ring->per_transfer_context[sw_index] = NULL;
1097da8fa4e3SBjoern A. Zeeb desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space,
1098da8fa4e3SBjoern A. Zeeb sw_index);
1099da8fa4e3SBjoern A. Zeeb desc->nbytes = 0;
1100da8fa4e3SBjoern A. Zeeb
1101da8fa4e3SBjoern A. Zeeb /* Update sw_index */
1102da8fa4e3SBjoern A. Zeeb sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1103da8fa4e3SBjoern A. Zeeb src_ring->sw_index = sw_index;
1104da8fa4e3SBjoern A. Zeeb
1105da8fa4e3SBjoern A. Zeeb return 0;
1106da8fa4e3SBjoern A. Zeeb }
1107da8fa4e3SBjoern A. Zeeb
ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp)1108da8fa4e3SBjoern A. Zeeb int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
1109da8fa4e3SBjoern A. Zeeb void **per_transfer_contextp)
1110da8fa4e3SBjoern A. Zeeb {
1111da8fa4e3SBjoern A. Zeeb return ce_state->ops->ce_completed_send_next_nolock(ce_state,
1112da8fa4e3SBjoern A. Zeeb per_transfer_contextp);
1113da8fa4e3SBjoern A. Zeeb }
1114da8fa4e3SBjoern A. Zeeb EXPORT_SYMBOL(ath10k_ce_completed_send_next_nolock);
1115da8fa4e3SBjoern A. Zeeb
ath10k_ce_extract_desc_data(struct ath10k * ar,struct ath10k_ce_ring * src_ring,u32 sw_index,dma_addr_t * bufferp,u32 * nbytesp,u32 * transfer_idp)1116da8fa4e3SBjoern A. Zeeb static void ath10k_ce_extract_desc_data(struct ath10k *ar,
1117da8fa4e3SBjoern A. Zeeb struct ath10k_ce_ring *src_ring,
1118da8fa4e3SBjoern A. Zeeb u32 sw_index,
1119da8fa4e3SBjoern A. Zeeb dma_addr_t *bufferp,
1120da8fa4e3SBjoern A. Zeeb u32 *nbytesp,
1121da8fa4e3SBjoern A. Zeeb u32 *transfer_idp)
1122da8fa4e3SBjoern A. Zeeb {
1123da8fa4e3SBjoern A. Zeeb struct ce_desc *base = src_ring->base_addr_owner_space;
1124da8fa4e3SBjoern A. Zeeb struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
1125da8fa4e3SBjoern A. Zeeb
1126da8fa4e3SBjoern A. Zeeb /* Return data from completed source descriptor */
1127da8fa4e3SBjoern A. Zeeb *bufferp = __le32_to_cpu(desc->addr);
1128da8fa4e3SBjoern A. Zeeb *nbytesp = __le16_to_cpu(desc->nbytes);
1129da8fa4e3SBjoern A. Zeeb *transfer_idp = MS(__le16_to_cpu(desc->flags),
1130da8fa4e3SBjoern A. Zeeb CE_DESC_FLAGS_META_DATA);
1131da8fa4e3SBjoern A. Zeeb }
1132da8fa4e3SBjoern A. Zeeb
ath10k_ce_extract_desc_data_64(struct ath10k * ar,struct ath10k_ce_ring * src_ring,u32 sw_index,dma_addr_t * bufferp,u32 * nbytesp,u32 * transfer_idp)1133da8fa4e3SBjoern A. Zeeb static void ath10k_ce_extract_desc_data_64(struct ath10k *ar,
1134da8fa4e3SBjoern A. Zeeb struct ath10k_ce_ring *src_ring,
1135da8fa4e3SBjoern A. Zeeb u32 sw_index,
1136da8fa4e3SBjoern A. Zeeb dma_addr_t *bufferp,
1137da8fa4e3SBjoern A. Zeeb u32 *nbytesp,
1138da8fa4e3SBjoern A. Zeeb u32 *transfer_idp)
1139da8fa4e3SBjoern A. Zeeb {
1140da8fa4e3SBjoern A. Zeeb struct ce_desc_64 *base = src_ring->base_addr_owner_space;
1141da8fa4e3SBjoern A. Zeeb struct ce_desc_64 *desc =
1142da8fa4e3SBjoern A. Zeeb CE_SRC_RING_TO_DESC_64(base, sw_index);
1143da8fa4e3SBjoern A. Zeeb
1144da8fa4e3SBjoern A. Zeeb /* Return data from completed source descriptor */
1145da8fa4e3SBjoern A. Zeeb *bufferp = __le64_to_cpu(desc->addr);
1146da8fa4e3SBjoern A. Zeeb *nbytesp = __le16_to_cpu(desc->nbytes);
1147da8fa4e3SBjoern A. Zeeb *transfer_idp = MS(__le16_to_cpu(desc->flags),
1148da8fa4e3SBjoern A. Zeeb CE_DESC_FLAGS_META_DATA);
1149da8fa4e3SBjoern A. Zeeb }
1150da8fa4e3SBjoern A. Zeeb
1151da8fa4e3SBjoern A. Zeeb /* NB: Modeled after ath10k_ce_completed_send_next */
ath10k_ce_cancel_send_next(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp,dma_addr_t * bufferp,unsigned int * nbytesp,unsigned int * transfer_idp)1152da8fa4e3SBjoern A. Zeeb int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
1153da8fa4e3SBjoern A. Zeeb void **per_transfer_contextp,
1154da8fa4e3SBjoern A. Zeeb dma_addr_t *bufferp,
1155da8fa4e3SBjoern A. Zeeb unsigned int *nbytesp,
1156da8fa4e3SBjoern A. Zeeb unsigned int *transfer_idp)
1157da8fa4e3SBjoern A. Zeeb {
1158da8fa4e3SBjoern A. Zeeb struct ath10k_ce_ring *src_ring;
1159da8fa4e3SBjoern A. Zeeb unsigned int nentries_mask;
1160da8fa4e3SBjoern A. Zeeb unsigned int sw_index;
1161da8fa4e3SBjoern A. Zeeb unsigned int write_index;
1162da8fa4e3SBjoern A. Zeeb int ret;
1163da8fa4e3SBjoern A. Zeeb struct ath10k *ar;
1164da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce;
1165da8fa4e3SBjoern A. Zeeb
1166da8fa4e3SBjoern A. Zeeb src_ring = ce_state->src_ring;
1167da8fa4e3SBjoern A. Zeeb
1168da8fa4e3SBjoern A. Zeeb if (!src_ring)
1169da8fa4e3SBjoern A. Zeeb return -EIO;
1170da8fa4e3SBjoern A. Zeeb
1171da8fa4e3SBjoern A. Zeeb ar = ce_state->ar;
1172da8fa4e3SBjoern A. Zeeb ce = ath10k_ce_priv(ar);
1173da8fa4e3SBjoern A. Zeeb
1174da8fa4e3SBjoern A. Zeeb spin_lock_bh(&ce->ce_lock);
1175da8fa4e3SBjoern A. Zeeb
1176da8fa4e3SBjoern A. Zeeb nentries_mask = src_ring->nentries_mask;
1177da8fa4e3SBjoern A. Zeeb sw_index = src_ring->sw_index;
1178da8fa4e3SBjoern A. Zeeb write_index = src_ring->write_index;
1179da8fa4e3SBjoern A. Zeeb
1180da8fa4e3SBjoern A. Zeeb if (write_index != sw_index) {
1181da8fa4e3SBjoern A. Zeeb ce_state->ops->ce_extract_desc_data(ar, src_ring, sw_index,
1182da8fa4e3SBjoern A. Zeeb bufferp, nbytesp,
1183da8fa4e3SBjoern A. Zeeb transfer_idp);
1184da8fa4e3SBjoern A. Zeeb
1185da8fa4e3SBjoern A. Zeeb if (per_transfer_contextp)
1186da8fa4e3SBjoern A. Zeeb *per_transfer_contextp =
1187da8fa4e3SBjoern A. Zeeb src_ring->per_transfer_context[sw_index];
1188da8fa4e3SBjoern A. Zeeb
1189da8fa4e3SBjoern A. Zeeb /* sanity */
1190da8fa4e3SBjoern A. Zeeb src_ring->per_transfer_context[sw_index] = NULL;
1191da8fa4e3SBjoern A. Zeeb
1192da8fa4e3SBjoern A. Zeeb /* Update sw_index */
1193da8fa4e3SBjoern A. Zeeb sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1194da8fa4e3SBjoern A. Zeeb src_ring->sw_index = sw_index;
1195da8fa4e3SBjoern A. Zeeb ret = 0;
1196da8fa4e3SBjoern A. Zeeb } else {
1197da8fa4e3SBjoern A. Zeeb ret = -EIO;
1198da8fa4e3SBjoern A. Zeeb }
1199da8fa4e3SBjoern A. Zeeb
1200da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ce->ce_lock);
1201da8fa4e3SBjoern A. Zeeb
1202da8fa4e3SBjoern A. Zeeb return ret;
1203da8fa4e3SBjoern A. Zeeb }
1204da8fa4e3SBjoern A. Zeeb EXPORT_SYMBOL(ath10k_ce_cancel_send_next);
1205da8fa4e3SBjoern A. Zeeb
ath10k_ce_completed_send_next(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp)1206da8fa4e3SBjoern A. Zeeb int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
1207da8fa4e3SBjoern A. Zeeb void **per_transfer_contextp)
1208da8fa4e3SBjoern A. Zeeb {
1209da8fa4e3SBjoern A. Zeeb struct ath10k *ar = ce_state->ar;
1210da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
1211da8fa4e3SBjoern A. Zeeb int ret;
1212da8fa4e3SBjoern A. Zeeb
1213da8fa4e3SBjoern A. Zeeb spin_lock_bh(&ce->ce_lock);
1214da8fa4e3SBjoern A. Zeeb ret = ath10k_ce_completed_send_next_nolock(ce_state,
1215da8fa4e3SBjoern A. Zeeb per_transfer_contextp);
1216da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ce->ce_lock);
1217da8fa4e3SBjoern A. Zeeb
1218da8fa4e3SBjoern A. Zeeb return ret;
1219da8fa4e3SBjoern A. Zeeb }
1220da8fa4e3SBjoern A. Zeeb EXPORT_SYMBOL(ath10k_ce_completed_send_next);
1221da8fa4e3SBjoern A. Zeeb
1222da8fa4e3SBjoern A. Zeeb /*
1223da8fa4e3SBjoern A. Zeeb * Guts of interrupt handler for per-engine interrupts on a particular CE.
1224da8fa4e3SBjoern A. Zeeb *
1225da8fa4e3SBjoern A. Zeeb * Invokes registered callbacks for recv_complete,
1226da8fa4e3SBjoern A. Zeeb * send_complete, and watermarks.
1227da8fa4e3SBjoern A. Zeeb */
ath10k_ce_per_engine_service(struct ath10k * ar,unsigned int ce_id)1228da8fa4e3SBjoern A. Zeeb void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
1229da8fa4e3SBjoern A. Zeeb {
1230da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
1231da8fa4e3SBjoern A. Zeeb struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1232da8fa4e3SBjoern A. Zeeb struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
1233da8fa4e3SBjoern A. Zeeb u32 ctrl_addr = ce_state->ctrl_addr;
1234da8fa4e3SBjoern A. Zeeb
1235da8fa4e3SBjoern A. Zeeb /*
1236da8fa4e3SBjoern A. Zeeb * Clear before handling
1237da8fa4e3SBjoern A. Zeeb *
1238da8fa4e3SBjoern A. Zeeb * Misc CE interrupts are not being handled, but still need
1239da8fa4e3SBjoern A. Zeeb * to be cleared.
1240da8fa4e3SBjoern A. Zeeb *
1241da8fa4e3SBjoern A. Zeeb * NOTE: When the last copy engine interrupt is cleared the
1242da8fa4e3SBjoern A. Zeeb * hardware will go to sleep. Once this happens any access to
1243da8fa4e3SBjoern A. Zeeb * the CE registers can cause a hardware fault.
1244da8fa4e3SBjoern A. Zeeb */
1245da8fa4e3SBjoern A. Zeeb ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
1246da8fa4e3SBjoern A. Zeeb wm_regs->cc_mask | wm_regs->wm_mask);
1247da8fa4e3SBjoern A. Zeeb
1248da8fa4e3SBjoern A. Zeeb if (ce_state->recv_cb)
1249da8fa4e3SBjoern A. Zeeb ce_state->recv_cb(ce_state);
1250da8fa4e3SBjoern A. Zeeb
1251da8fa4e3SBjoern A. Zeeb if (ce_state->send_cb)
1252da8fa4e3SBjoern A. Zeeb ce_state->send_cb(ce_state);
1253da8fa4e3SBjoern A. Zeeb }
1254da8fa4e3SBjoern A. Zeeb EXPORT_SYMBOL(ath10k_ce_per_engine_service);
1255da8fa4e3SBjoern A. Zeeb
1256da8fa4e3SBjoern A. Zeeb /*
1257da8fa4e3SBjoern A. Zeeb * Handler for per-engine interrupts on ALL active CEs.
1258da8fa4e3SBjoern A. Zeeb * This is used in cases where the system is sharing a
1259*07724ba6SBjoern A. Zeeb * single interrupt for all CEs
1260da8fa4e3SBjoern A. Zeeb */
1261da8fa4e3SBjoern A. Zeeb
ath10k_ce_per_engine_service_any(struct ath10k * ar)1262da8fa4e3SBjoern A. Zeeb void ath10k_ce_per_engine_service_any(struct ath10k *ar)
1263da8fa4e3SBjoern A. Zeeb {
1264da8fa4e3SBjoern A. Zeeb int ce_id;
1265da8fa4e3SBjoern A. Zeeb u32 intr_summary;
1266da8fa4e3SBjoern A. Zeeb
1267da8fa4e3SBjoern A. Zeeb intr_summary = ath10k_ce_interrupt_summary(ar);
1268da8fa4e3SBjoern A. Zeeb
1269da8fa4e3SBjoern A. Zeeb for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {
1270da8fa4e3SBjoern A. Zeeb if (intr_summary & (1 << ce_id))
1271da8fa4e3SBjoern A. Zeeb intr_summary &= ~(1 << ce_id);
1272da8fa4e3SBjoern A. Zeeb else
1273da8fa4e3SBjoern A. Zeeb /* no intr pending on this CE */
1274da8fa4e3SBjoern A. Zeeb continue;
1275da8fa4e3SBjoern A. Zeeb
1276da8fa4e3SBjoern A. Zeeb ath10k_ce_per_engine_service(ar, ce_id);
1277da8fa4e3SBjoern A. Zeeb }
1278da8fa4e3SBjoern A. Zeeb }
1279da8fa4e3SBjoern A. Zeeb EXPORT_SYMBOL(ath10k_ce_per_engine_service_any);
1280da8fa4e3SBjoern A. Zeeb
1281da8fa4e3SBjoern A. Zeeb /*
1282da8fa4e3SBjoern A. Zeeb * Adjust interrupts for the copy complete handler.
1283da8fa4e3SBjoern A. Zeeb * If it's needed for either send or recv, then unmask
1284da8fa4e3SBjoern A. Zeeb * this interrupt; otherwise, mask it.
1285da8fa4e3SBjoern A. Zeeb *
1286da8fa4e3SBjoern A. Zeeb * Called with ce_lock held.
1287da8fa4e3SBjoern A. Zeeb */
ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe * ce_state)1288da8fa4e3SBjoern A. Zeeb static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state)
1289da8fa4e3SBjoern A. Zeeb {
1290da8fa4e3SBjoern A. Zeeb u32 ctrl_addr = ce_state->ctrl_addr;
1291da8fa4e3SBjoern A. Zeeb struct ath10k *ar = ce_state->ar;
1292da8fa4e3SBjoern A. Zeeb bool disable_copy_compl_intr = ce_state->attr_flags & CE_ATTR_DIS_INTR;
1293da8fa4e3SBjoern A. Zeeb
1294da8fa4e3SBjoern A. Zeeb if ((!disable_copy_compl_intr) &&
1295da8fa4e3SBjoern A. Zeeb (ce_state->send_cb || ce_state->recv_cb))
1296da8fa4e3SBjoern A. Zeeb ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr);
1297da8fa4e3SBjoern A. Zeeb else
1298da8fa4e3SBjoern A. Zeeb ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
1299da8fa4e3SBjoern A. Zeeb
1300da8fa4e3SBjoern A. Zeeb ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
1301da8fa4e3SBjoern A. Zeeb }
1302da8fa4e3SBjoern A. Zeeb
ath10k_ce_disable_interrupt(struct ath10k * ar,int ce_id)1303da8fa4e3SBjoern A. Zeeb void ath10k_ce_disable_interrupt(struct ath10k *ar, int ce_id)
1304da8fa4e3SBjoern A. Zeeb {
1305da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
1306da8fa4e3SBjoern A. Zeeb struct ath10k_ce_pipe *ce_state;
1307da8fa4e3SBjoern A. Zeeb u32 ctrl_addr;
1308da8fa4e3SBjoern A. Zeeb
1309da8fa4e3SBjoern A. Zeeb ce_state = &ce->ce_states[ce_id];
1310da8fa4e3SBjoern A. Zeeb if (ce_state->attr_flags & CE_ATTR_POLL)
1311da8fa4e3SBjoern A. Zeeb return;
1312da8fa4e3SBjoern A. Zeeb
1313da8fa4e3SBjoern A. Zeeb ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1314da8fa4e3SBjoern A. Zeeb
1315da8fa4e3SBjoern A. Zeeb ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
1316da8fa4e3SBjoern A. Zeeb ath10k_ce_error_intr_disable(ar, ctrl_addr);
1317da8fa4e3SBjoern A. Zeeb ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
1318da8fa4e3SBjoern A. Zeeb }
1319da8fa4e3SBjoern A. Zeeb EXPORT_SYMBOL(ath10k_ce_disable_interrupt);
1320da8fa4e3SBjoern A. Zeeb
ath10k_ce_disable_interrupts(struct ath10k * ar)1321da8fa4e3SBjoern A. Zeeb void ath10k_ce_disable_interrupts(struct ath10k *ar)
1322da8fa4e3SBjoern A. Zeeb {
1323da8fa4e3SBjoern A. Zeeb int ce_id;
1324da8fa4e3SBjoern A. Zeeb
1325da8fa4e3SBjoern A. Zeeb for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
1326da8fa4e3SBjoern A. Zeeb ath10k_ce_disable_interrupt(ar, ce_id);
1327da8fa4e3SBjoern A. Zeeb }
1328da8fa4e3SBjoern A. Zeeb EXPORT_SYMBOL(ath10k_ce_disable_interrupts);
1329da8fa4e3SBjoern A. Zeeb
ath10k_ce_enable_interrupt(struct ath10k * ar,int ce_id)1330da8fa4e3SBjoern A. Zeeb void ath10k_ce_enable_interrupt(struct ath10k *ar, int ce_id)
1331da8fa4e3SBjoern A. Zeeb {
1332da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
1333da8fa4e3SBjoern A. Zeeb struct ath10k_ce_pipe *ce_state;
1334da8fa4e3SBjoern A. Zeeb
1335da8fa4e3SBjoern A. Zeeb ce_state = &ce->ce_states[ce_id];
1336da8fa4e3SBjoern A. Zeeb if (ce_state->attr_flags & CE_ATTR_POLL)
1337da8fa4e3SBjoern A. Zeeb return;
1338da8fa4e3SBjoern A. Zeeb
1339da8fa4e3SBjoern A. Zeeb ath10k_ce_per_engine_handler_adjust(ce_state);
1340da8fa4e3SBjoern A. Zeeb }
1341da8fa4e3SBjoern A. Zeeb EXPORT_SYMBOL(ath10k_ce_enable_interrupt);
1342da8fa4e3SBjoern A. Zeeb
ath10k_ce_enable_interrupts(struct ath10k * ar)1343da8fa4e3SBjoern A. Zeeb void ath10k_ce_enable_interrupts(struct ath10k *ar)
1344da8fa4e3SBjoern A. Zeeb {
1345da8fa4e3SBjoern A. Zeeb int ce_id;
1346da8fa4e3SBjoern A. Zeeb
1347da8fa4e3SBjoern A. Zeeb /* Enable interrupts for copy engine that
1348da8fa4e3SBjoern A. Zeeb * are not using polling mode.
1349da8fa4e3SBjoern A. Zeeb */
1350da8fa4e3SBjoern A. Zeeb for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
1351da8fa4e3SBjoern A. Zeeb ath10k_ce_enable_interrupt(ar, ce_id);
1352da8fa4e3SBjoern A. Zeeb }
1353da8fa4e3SBjoern A. Zeeb EXPORT_SYMBOL(ath10k_ce_enable_interrupts);
1354da8fa4e3SBjoern A. Zeeb
ath10k_ce_init_src_ring(struct ath10k * ar,unsigned int ce_id,const struct ce_attr * attr)1355da8fa4e3SBjoern A. Zeeb static int ath10k_ce_init_src_ring(struct ath10k *ar,
1356da8fa4e3SBjoern A. Zeeb unsigned int ce_id,
1357da8fa4e3SBjoern A. Zeeb const struct ce_attr *attr)
1358da8fa4e3SBjoern A. Zeeb {
1359da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
1360da8fa4e3SBjoern A. Zeeb struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1361da8fa4e3SBjoern A. Zeeb struct ath10k_ce_ring *src_ring = ce_state->src_ring;
1362da8fa4e3SBjoern A. Zeeb u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1363da8fa4e3SBjoern A. Zeeb
1364da8fa4e3SBjoern A. Zeeb nentries = roundup_pow_of_two(attr->src_nentries);
1365da8fa4e3SBjoern A. Zeeb
1366da8fa4e3SBjoern A. Zeeb if (ar->hw_params.target_64bit)
1367da8fa4e3SBjoern A. Zeeb memset(src_ring->base_addr_owner_space, 0,
1368da8fa4e3SBjoern A. Zeeb nentries * sizeof(struct ce_desc_64));
1369da8fa4e3SBjoern A. Zeeb else
1370da8fa4e3SBjoern A. Zeeb memset(src_ring->base_addr_owner_space, 0,
1371da8fa4e3SBjoern A. Zeeb nentries * sizeof(struct ce_desc));
1372da8fa4e3SBjoern A. Zeeb
1373da8fa4e3SBjoern A. Zeeb src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
1374da8fa4e3SBjoern A. Zeeb src_ring->sw_index &= src_ring->nentries_mask;
1375da8fa4e3SBjoern A. Zeeb src_ring->hw_index = src_ring->sw_index;
1376da8fa4e3SBjoern A. Zeeb
1377da8fa4e3SBjoern A. Zeeb src_ring->write_index =
1378da8fa4e3SBjoern A. Zeeb ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
1379da8fa4e3SBjoern A. Zeeb src_ring->write_index &= src_ring->nentries_mask;
1380da8fa4e3SBjoern A. Zeeb
1381da8fa4e3SBjoern A. Zeeb ath10k_ce_src_ring_base_addr_set(ar, ce_id,
1382da8fa4e3SBjoern A. Zeeb src_ring->base_addr_ce_space);
1383da8fa4e3SBjoern A. Zeeb ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
1384da8fa4e3SBjoern A. Zeeb ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
1385da8fa4e3SBjoern A. Zeeb ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
1386da8fa4e3SBjoern A. Zeeb ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
1387da8fa4e3SBjoern A. Zeeb ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
1388da8fa4e3SBjoern A. Zeeb
1389da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_BOOT,
1390da8fa4e3SBjoern A. Zeeb "boot init ce src ring id %d entries %d base_addr %pK\n",
1391da8fa4e3SBjoern A. Zeeb ce_id, nentries, src_ring->base_addr_owner_space);
1392da8fa4e3SBjoern A. Zeeb
1393da8fa4e3SBjoern A. Zeeb return 0;
1394da8fa4e3SBjoern A. Zeeb }
1395da8fa4e3SBjoern A. Zeeb
ath10k_ce_init_dest_ring(struct ath10k * ar,unsigned int ce_id,const struct ce_attr * attr)1396da8fa4e3SBjoern A. Zeeb static int ath10k_ce_init_dest_ring(struct ath10k *ar,
1397da8fa4e3SBjoern A. Zeeb unsigned int ce_id,
1398da8fa4e3SBjoern A. Zeeb const struct ce_attr *attr)
1399da8fa4e3SBjoern A. Zeeb {
1400da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
1401da8fa4e3SBjoern A. Zeeb struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1402da8fa4e3SBjoern A. Zeeb struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
1403da8fa4e3SBjoern A. Zeeb u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1404da8fa4e3SBjoern A. Zeeb
1405da8fa4e3SBjoern A. Zeeb nentries = roundup_pow_of_two(attr->dest_nentries);
1406da8fa4e3SBjoern A. Zeeb
1407da8fa4e3SBjoern A. Zeeb if (ar->hw_params.target_64bit)
1408da8fa4e3SBjoern A. Zeeb memset(dest_ring->base_addr_owner_space, 0,
1409da8fa4e3SBjoern A. Zeeb nentries * sizeof(struct ce_desc_64));
1410da8fa4e3SBjoern A. Zeeb else
1411da8fa4e3SBjoern A. Zeeb memset(dest_ring->base_addr_owner_space, 0,
1412da8fa4e3SBjoern A. Zeeb nentries * sizeof(struct ce_desc));
1413da8fa4e3SBjoern A. Zeeb
1414da8fa4e3SBjoern A. Zeeb dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
1415da8fa4e3SBjoern A. Zeeb dest_ring->sw_index &= dest_ring->nentries_mask;
1416da8fa4e3SBjoern A. Zeeb dest_ring->write_index =
1417da8fa4e3SBjoern A. Zeeb ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
1418da8fa4e3SBjoern A. Zeeb dest_ring->write_index &= dest_ring->nentries_mask;
1419da8fa4e3SBjoern A. Zeeb
1420da8fa4e3SBjoern A. Zeeb ath10k_ce_dest_ring_base_addr_set(ar, ce_id,
1421da8fa4e3SBjoern A. Zeeb dest_ring->base_addr_ce_space);
1422da8fa4e3SBjoern A. Zeeb ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
1423da8fa4e3SBjoern A. Zeeb ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
1424da8fa4e3SBjoern A. Zeeb ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
1425da8fa4e3SBjoern A. Zeeb ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
1426da8fa4e3SBjoern A. Zeeb
1427da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_BOOT,
1428da8fa4e3SBjoern A. Zeeb "boot ce dest ring id %d entries %d base_addr %pK\n",
1429da8fa4e3SBjoern A. Zeeb ce_id, nentries, dest_ring->base_addr_owner_space);
1430da8fa4e3SBjoern A. Zeeb
1431da8fa4e3SBjoern A. Zeeb return 0;
1432da8fa4e3SBjoern A. Zeeb }
1433da8fa4e3SBjoern A. Zeeb
ath10k_ce_alloc_shadow_base(struct ath10k * ar,struct ath10k_ce_ring * src_ring,u32 nentries)1434da8fa4e3SBjoern A. Zeeb static int ath10k_ce_alloc_shadow_base(struct ath10k *ar,
1435da8fa4e3SBjoern A. Zeeb struct ath10k_ce_ring *src_ring,
1436da8fa4e3SBjoern A. Zeeb u32 nentries)
1437da8fa4e3SBjoern A. Zeeb {
1438da8fa4e3SBjoern A. Zeeb src_ring->shadow_base_unaligned = kcalloc(nentries,
1439da8fa4e3SBjoern A. Zeeb sizeof(struct ce_desc_64),
1440da8fa4e3SBjoern A. Zeeb GFP_KERNEL);
1441da8fa4e3SBjoern A. Zeeb if (!src_ring->shadow_base_unaligned)
1442da8fa4e3SBjoern A. Zeeb return -ENOMEM;
1443da8fa4e3SBjoern A. Zeeb
1444da8fa4e3SBjoern A. Zeeb src_ring->shadow_base = (struct ce_desc_64 *)
1445da8fa4e3SBjoern A. Zeeb PTR_ALIGN(src_ring->shadow_base_unaligned,
1446da8fa4e3SBjoern A. Zeeb CE_DESC_RING_ALIGN);
1447da8fa4e3SBjoern A. Zeeb return 0;
1448da8fa4e3SBjoern A. Zeeb }
1449da8fa4e3SBjoern A. Zeeb
1450da8fa4e3SBjoern A. Zeeb static struct ath10k_ce_ring *
ath10k_ce_alloc_src_ring(struct ath10k * ar,unsigned int ce_id,const struct ce_attr * attr)1451da8fa4e3SBjoern A. Zeeb ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
1452da8fa4e3SBjoern A. Zeeb const struct ce_attr *attr)
1453da8fa4e3SBjoern A. Zeeb {
1454da8fa4e3SBjoern A. Zeeb struct ath10k_ce_ring *src_ring;
1455da8fa4e3SBjoern A. Zeeb u32 nentries = attr->src_nentries;
1456da8fa4e3SBjoern A. Zeeb dma_addr_t base_addr;
1457da8fa4e3SBjoern A. Zeeb int ret;
1458da8fa4e3SBjoern A. Zeeb
1459da8fa4e3SBjoern A. Zeeb nentries = roundup_pow_of_two(nentries);
1460da8fa4e3SBjoern A. Zeeb
1461da8fa4e3SBjoern A. Zeeb src_ring = kzalloc(struct_size(src_ring, per_transfer_context,
1462da8fa4e3SBjoern A. Zeeb nentries), GFP_KERNEL);
1463da8fa4e3SBjoern A. Zeeb if (src_ring == NULL)
1464da8fa4e3SBjoern A. Zeeb return ERR_PTR(-ENOMEM);
1465da8fa4e3SBjoern A. Zeeb
1466da8fa4e3SBjoern A. Zeeb src_ring->nentries = nentries;
1467da8fa4e3SBjoern A. Zeeb src_ring->nentries_mask = nentries - 1;
1468da8fa4e3SBjoern A. Zeeb
1469da8fa4e3SBjoern A. Zeeb /*
1470da8fa4e3SBjoern A. Zeeb * Legacy platforms that do not support cache
1471da8fa4e3SBjoern A. Zeeb * coherent DMA are unsupported
1472da8fa4e3SBjoern A. Zeeb */
1473da8fa4e3SBjoern A. Zeeb src_ring->base_addr_owner_space_unaligned =
1474da8fa4e3SBjoern A. Zeeb dma_alloc_coherent(ar->dev,
1475da8fa4e3SBjoern A. Zeeb (nentries * sizeof(struct ce_desc) +
1476da8fa4e3SBjoern A. Zeeb CE_DESC_RING_ALIGN),
1477da8fa4e3SBjoern A. Zeeb &base_addr, GFP_KERNEL);
1478da8fa4e3SBjoern A. Zeeb if (!src_ring->base_addr_owner_space_unaligned) {
1479da8fa4e3SBjoern A. Zeeb kfree(src_ring);
1480da8fa4e3SBjoern A. Zeeb return ERR_PTR(-ENOMEM);
1481da8fa4e3SBjoern A. Zeeb }
1482da8fa4e3SBjoern A. Zeeb
1483da8fa4e3SBjoern A. Zeeb src_ring->base_addr_ce_space_unaligned = base_addr;
1484da8fa4e3SBjoern A. Zeeb
1485da8fa4e3SBjoern A. Zeeb src_ring->base_addr_owner_space =
1486da8fa4e3SBjoern A. Zeeb PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
1487da8fa4e3SBjoern A. Zeeb CE_DESC_RING_ALIGN);
1488da8fa4e3SBjoern A. Zeeb src_ring->base_addr_ce_space =
1489da8fa4e3SBjoern A. Zeeb ALIGN(src_ring->base_addr_ce_space_unaligned,
1490da8fa4e3SBjoern A. Zeeb CE_DESC_RING_ALIGN);
1491da8fa4e3SBjoern A. Zeeb
1492da8fa4e3SBjoern A. Zeeb if (ar->hw_params.shadow_reg_support) {
1493da8fa4e3SBjoern A. Zeeb ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
1494da8fa4e3SBjoern A. Zeeb if (ret) {
1495da8fa4e3SBjoern A. Zeeb dma_free_coherent(ar->dev,
1496da8fa4e3SBjoern A. Zeeb (nentries * sizeof(struct ce_desc) +
1497da8fa4e3SBjoern A. Zeeb CE_DESC_RING_ALIGN),
1498da8fa4e3SBjoern A. Zeeb src_ring->base_addr_owner_space_unaligned,
1499da8fa4e3SBjoern A. Zeeb base_addr);
1500da8fa4e3SBjoern A. Zeeb kfree(src_ring);
1501da8fa4e3SBjoern A. Zeeb return ERR_PTR(ret);
1502da8fa4e3SBjoern A. Zeeb }
1503da8fa4e3SBjoern A. Zeeb }
1504da8fa4e3SBjoern A. Zeeb
1505da8fa4e3SBjoern A. Zeeb return src_ring;
1506da8fa4e3SBjoern A. Zeeb }
1507da8fa4e3SBjoern A. Zeeb
1508da8fa4e3SBjoern A. Zeeb static struct ath10k_ce_ring *
ath10k_ce_alloc_src_ring_64(struct ath10k * ar,unsigned int ce_id,const struct ce_attr * attr)1509da8fa4e3SBjoern A. Zeeb ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id,
1510da8fa4e3SBjoern A. Zeeb const struct ce_attr *attr)
1511da8fa4e3SBjoern A. Zeeb {
1512da8fa4e3SBjoern A. Zeeb struct ath10k_ce_ring *src_ring;
1513da8fa4e3SBjoern A. Zeeb u32 nentries = attr->src_nentries;
1514da8fa4e3SBjoern A. Zeeb dma_addr_t base_addr;
1515da8fa4e3SBjoern A. Zeeb int ret;
1516da8fa4e3SBjoern A. Zeeb
1517da8fa4e3SBjoern A. Zeeb nentries = roundup_pow_of_two(nentries);
1518da8fa4e3SBjoern A. Zeeb
1519da8fa4e3SBjoern A. Zeeb src_ring = kzalloc(struct_size(src_ring, per_transfer_context,
1520da8fa4e3SBjoern A. Zeeb nentries), GFP_KERNEL);
1521da8fa4e3SBjoern A. Zeeb if (!src_ring)
1522da8fa4e3SBjoern A. Zeeb return ERR_PTR(-ENOMEM);
1523da8fa4e3SBjoern A. Zeeb
1524da8fa4e3SBjoern A. Zeeb src_ring->nentries = nentries;
1525da8fa4e3SBjoern A. Zeeb src_ring->nentries_mask = nentries - 1;
1526da8fa4e3SBjoern A. Zeeb
1527da8fa4e3SBjoern A. Zeeb /* Legacy platforms that do not support cache
1528da8fa4e3SBjoern A. Zeeb * coherent DMA are unsupported
1529da8fa4e3SBjoern A. Zeeb */
1530da8fa4e3SBjoern A. Zeeb src_ring->base_addr_owner_space_unaligned =
1531da8fa4e3SBjoern A. Zeeb dma_alloc_coherent(ar->dev,
1532da8fa4e3SBjoern A. Zeeb (nentries * sizeof(struct ce_desc_64) +
1533da8fa4e3SBjoern A. Zeeb CE_DESC_RING_ALIGN),
1534da8fa4e3SBjoern A. Zeeb &base_addr, GFP_KERNEL);
1535da8fa4e3SBjoern A. Zeeb if (!src_ring->base_addr_owner_space_unaligned) {
1536da8fa4e3SBjoern A. Zeeb kfree(src_ring);
1537da8fa4e3SBjoern A. Zeeb return ERR_PTR(-ENOMEM);
1538da8fa4e3SBjoern A. Zeeb }
1539da8fa4e3SBjoern A. Zeeb
1540da8fa4e3SBjoern A. Zeeb src_ring->base_addr_ce_space_unaligned = base_addr;
1541da8fa4e3SBjoern A. Zeeb
1542da8fa4e3SBjoern A. Zeeb src_ring->base_addr_owner_space =
1543da8fa4e3SBjoern A. Zeeb PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
1544da8fa4e3SBjoern A. Zeeb CE_DESC_RING_ALIGN);
1545da8fa4e3SBjoern A. Zeeb src_ring->base_addr_ce_space =
1546da8fa4e3SBjoern A. Zeeb ALIGN(src_ring->base_addr_ce_space_unaligned,
1547da8fa4e3SBjoern A. Zeeb CE_DESC_RING_ALIGN);
1548da8fa4e3SBjoern A. Zeeb
1549da8fa4e3SBjoern A. Zeeb if (ar->hw_params.shadow_reg_support) {
1550da8fa4e3SBjoern A. Zeeb ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
1551da8fa4e3SBjoern A. Zeeb if (ret) {
1552da8fa4e3SBjoern A. Zeeb dma_free_coherent(ar->dev,
1553da8fa4e3SBjoern A. Zeeb (nentries * sizeof(struct ce_desc_64) +
1554da8fa4e3SBjoern A. Zeeb CE_DESC_RING_ALIGN),
1555da8fa4e3SBjoern A. Zeeb src_ring->base_addr_owner_space_unaligned,
1556da8fa4e3SBjoern A. Zeeb base_addr);
1557da8fa4e3SBjoern A. Zeeb kfree(src_ring);
1558da8fa4e3SBjoern A. Zeeb return ERR_PTR(ret);
1559da8fa4e3SBjoern A. Zeeb }
1560da8fa4e3SBjoern A. Zeeb }
1561da8fa4e3SBjoern A. Zeeb
1562da8fa4e3SBjoern A. Zeeb return src_ring;
1563da8fa4e3SBjoern A. Zeeb }
1564da8fa4e3SBjoern A. Zeeb
1565da8fa4e3SBjoern A. Zeeb static struct ath10k_ce_ring *
ath10k_ce_alloc_dest_ring(struct ath10k * ar,unsigned int ce_id,const struct ce_attr * attr)1566da8fa4e3SBjoern A. Zeeb ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
1567da8fa4e3SBjoern A. Zeeb const struct ce_attr *attr)
1568da8fa4e3SBjoern A. Zeeb {
1569da8fa4e3SBjoern A. Zeeb struct ath10k_ce_ring *dest_ring;
1570da8fa4e3SBjoern A. Zeeb u32 nentries;
1571da8fa4e3SBjoern A. Zeeb dma_addr_t base_addr;
1572da8fa4e3SBjoern A. Zeeb
1573da8fa4e3SBjoern A. Zeeb nentries = roundup_pow_of_two(attr->dest_nentries);
1574da8fa4e3SBjoern A. Zeeb
1575da8fa4e3SBjoern A. Zeeb dest_ring = kzalloc(struct_size(dest_ring, per_transfer_context,
1576da8fa4e3SBjoern A. Zeeb nentries), GFP_KERNEL);
1577da8fa4e3SBjoern A. Zeeb if (dest_ring == NULL)
1578da8fa4e3SBjoern A. Zeeb return ERR_PTR(-ENOMEM);
1579da8fa4e3SBjoern A. Zeeb
1580da8fa4e3SBjoern A. Zeeb dest_ring->nentries = nentries;
1581da8fa4e3SBjoern A. Zeeb dest_ring->nentries_mask = nentries - 1;
1582da8fa4e3SBjoern A. Zeeb
1583da8fa4e3SBjoern A. Zeeb /*
1584da8fa4e3SBjoern A. Zeeb * Legacy platforms that do not support cache
1585da8fa4e3SBjoern A. Zeeb * coherent DMA are unsupported
1586da8fa4e3SBjoern A. Zeeb */
1587da8fa4e3SBjoern A. Zeeb dest_ring->base_addr_owner_space_unaligned =
1588da8fa4e3SBjoern A. Zeeb dma_alloc_coherent(ar->dev,
1589da8fa4e3SBjoern A. Zeeb (nentries * sizeof(struct ce_desc) +
1590da8fa4e3SBjoern A. Zeeb CE_DESC_RING_ALIGN),
1591da8fa4e3SBjoern A. Zeeb &base_addr, GFP_KERNEL);
1592da8fa4e3SBjoern A. Zeeb if (!dest_ring->base_addr_owner_space_unaligned) {
1593da8fa4e3SBjoern A. Zeeb kfree(dest_ring);
1594da8fa4e3SBjoern A. Zeeb return ERR_PTR(-ENOMEM);
1595da8fa4e3SBjoern A. Zeeb }
1596da8fa4e3SBjoern A. Zeeb
1597da8fa4e3SBjoern A. Zeeb dest_ring->base_addr_ce_space_unaligned = base_addr;
1598da8fa4e3SBjoern A. Zeeb
1599da8fa4e3SBjoern A. Zeeb dest_ring->base_addr_owner_space =
1600da8fa4e3SBjoern A. Zeeb PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
1601da8fa4e3SBjoern A. Zeeb CE_DESC_RING_ALIGN);
1602da8fa4e3SBjoern A. Zeeb dest_ring->base_addr_ce_space =
1603da8fa4e3SBjoern A. Zeeb ALIGN(dest_ring->base_addr_ce_space_unaligned,
1604da8fa4e3SBjoern A. Zeeb CE_DESC_RING_ALIGN);
1605da8fa4e3SBjoern A. Zeeb
1606da8fa4e3SBjoern A. Zeeb return dest_ring;
1607da8fa4e3SBjoern A. Zeeb }
1608da8fa4e3SBjoern A. Zeeb
1609da8fa4e3SBjoern A. Zeeb static struct ath10k_ce_ring *
ath10k_ce_alloc_dest_ring_64(struct ath10k * ar,unsigned int ce_id,const struct ce_attr * attr)1610da8fa4e3SBjoern A. Zeeb ath10k_ce_alloc_dest_ring_64(struct ath10k *ar, unsigned int ce_id,
1611da8fa4e3SBjoern A. Zeeb const struct ce_attr *attr)
1612da8fa4e3SBjoern A. Zeeb {
1613da8fa4e3SBjoern A. Zeeb struct ath10k_ce_ring *dest_ring;
1614da8fa4e3SBjoern A. Zeeb u32 nentries;
1615da8fa4e3SBjoern A. Zeeb dma_addr_t base_addr;
1616da8fa4e3SBjoern A. Zeeb
1617da8fa4e3SBjoern A. Zeeb nentries = roundup_pow_of_two(attr->dest_nentries);
1618da8fa4e3SBjoern A. Zeeb
1619da8fa4e3SBjoern A. Zeeb dest_ring = kzalloc(struct_size(dest_ring, per_transfer_context,
1620da8fa4e3SBjoern A. Zeeb nentries), GFP_KERNEL);
1621da8fa4e3SBjoern A. Zeeb if (!dest_ring)
1622da8fa4e3SBjoern A. Zeeb return ERR_PTR(-ENOMEM);
1623da8fa4e3SBjoern A. Zeeb
1624da8fa4e3SBjoern A. Zeeb dest_ring->nentries = nentries;
1625da8fa4e3SBjoern A. Zeeb dest_ring->nentries_mask = nentries - 1;
1626da8fa4e3SBjoern A. Zeeb
1627da8fa4e3SBjoern A. Zeeb /* Legacy platforms that do not support cache
1628da8fa4e3SBjoern A. Zeeb * coherent DMA are unsupported
1629da8fa4e3SBjoern A. Zeeb */
1630da8fa4e3SBjoern A. Zeeb dest_ring->base_addr_owner_space_unaligned =
1631da8fa4e3SBjoern A. Zeeb dma_alloc_coherent(ar->dev,
1632da8fa4e3SBjoern A. Zeeb (nentries * sizeof(struct ce_desc_64) +
1633da8fa4e3SBjoern A. Zeeb CE_DESC_RING_ALIGN),
1634da8fa4e3SBjoern A. Zeeb &base_addr, GFP_KERNEL);
1635da8fa4e3SBjoern A. Zeeb if (!dest_ring->base_addr_owner_space_unaligned) {
1636da8fa4e3SBjoern A. Zeeb kfree(dest_ring);
1637da8fa4e3SBjoern A. Zeeb return ERR_PTR(-ENOMEM);
1638da8fa4e3SBjoern A. Zeeb }
1639da8fa4e3SBjoern A. Zeeb
1640da8fa4e3SBjoern A. Zeeb dest_ring->base_addr_ce_space_unaligned = base_addr;
1641da8fa4e3SBjoern A. Zeeb
1642da8fa4e3SBjoern A. Zeeb /* Correctly initialize memory to 0 to prevent garbage
1643da8fa4e3SBjoern A. Zeeb * data crashing system when download firmware
1644da8fa4e3SBjoern A. Zeeb */
1645da8fa4e3SBjoern A. Zeeb dest_ring->base_addr_owner_space =
1646da8fa4e3SBjoern A. Zeeb PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
1647da8fa4e3SBjoern A. Zeeb CE_DESC_RING_ALIGN);
1648da8fa4e3SBjoern A. Zeeb dest_ring->base_addr_ce_space =
1649da8fa4e3SBjoern A. Zeeb ALIGN(dest_ring->base_addr_ce_space_unaligned,
1650da8fa4e3SBjoern A. Zeeb CE_DESC_RING_ALIGN);
1651da8fa4e3SBjoern A. Zeeb
1652da8fa4e3SBjoern A. Zeeb return dest_ring;
1653da8fa4e3SBjoern A. Zeeb }
1654da8fa4e3SBjoern A. Zeeb
1655da8fa4e3SBjoern A. Zeeb /*
1656da8fa4e3SBjoern A. Zeeb * Initialize a Copy Engine based on caller-supplied attributes.
1657da8fa4e3SBjoern A. Zeeb * This may be called once to initialize both source and destination
1658da8fa4e3SBjoern A. Zeeb * rings or it may be called twice for separate source and destination
1659da8fa4e3SBjoern A. Zeeb * initialization. It may be that only one side or the other is
1660da8fa4e3SBjoern A. Zeeb * initialized by software/firmware.
1661da8fa4e3SBjoern A. Zeeb */
ath10k_ce_init_pipe(struct ath10k * ar,unsigned int ce_id,const struct ce_attr * attr)1662da8fa4e3SBjoern A. Zeeb int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
1663da8fa4e3SBjoern A. Zeeb const struct ce_attr *attr)
1664da8fa4e3SBjoern A. Zeeb {
1665da8fa4e3SBjoern A. Zeeb int ret;
1666da8fa4e3SBjoern A. Zeeb
1667da8fa4e3SBjoern A. Zeeb if (attr->src_nentries) {
1668da8fa4e3SBjoern A. Zeeb ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
1669da8fa4e3SBjoern A. Zeeb if (ret) {
1670da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "Failed to initialize CE src ring for ID: %d (%d)\n",
1671da8fa4e3SBjoern A. Zeeb ce_id, ret);
1672da8fa4e3SBjoern A. Zeeb return ret;
1673da8fa4e3SBjoern A. Zeeb }
1674da8fa4e3SBjoern A. Zeeb }
1675da8fa4e3SBjoern A. Zeeb
1676da8fa4e3SBjoern A. Zeeb if (attr->dest_nentries) {
1677da8fa4e3SBjoern A. Zeeb ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
1678da8fa4e3SBjoern A. Zeeb if (ret) {
1679da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "Failed to initialize CE dest ring for ID: %d (%d)\n",
1680da8fa4e3SBjoern A. Zeeb ce_id, ret);
1681da8fa4e3SBjoern A. Zeeb return ret;
1682da8fa4e3SBjoern A. Zeeb }
1683da8fa4e3SBjoern A. Zeeb }
1684da8fa4e3SBjoern A. Zeeb
1685da8fa4e3SBjoern A. Zeeb return 0;
1686da8fa4e3SBjoern A. Zeeb }
1687da8fa4e3SBjoern A. Zeeb EXPORT_SYMBOL(ath10k_ce_init_pipe);
1688da8fa4e3SBjoern A. Zeeb
ath10k_ce_deinit_src_ring(struct ath10k * ar,unsigned int ce_id)1689da8fa4e3SBjoern A. Zeeb static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
1690da8fa4e3SBjoern A. Zeeb {
1691da8fa4e3SBjoern A. Zeeb u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1692da8fa4e3SBjoern A. Zeeb
1693da8fa4e3SBjoern A. Zeeb ath10k_ce_src_ring_base_addr_set(ar, ce_id, 0);
1694da8fa4e3SBjoern A. Zeeb ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
1695da8fa4e3SBjoern A. Zeeb ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
1696da8fa4e3SBjoern A. Zeeb ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
1697da8fa4e3SBjoern A. Zeeb }
1698da8fa4e3SBjoern A. Zeeb
ath10k_ce_deinit_dest_ring(struct ath10k * ar,unsigned int ce_id)1699da8fa4e3SBjoern A. Zeeb static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
1700da8fa4e3SBjoern A. Zeeb {
1701da8fa4e3SBjoern A. Zeeb u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1702da8fa4e3SBjoern A. Zeeb
1703da8fa4e3SBjoern A. Zeeb ath10k_ce_dest_ring_base_addr_set(ar, ce_id, 0);
1704da8fa4e3SBjoern A. Zeeb ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
1705da8fa4e3SBjoern A. Zeeb ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
1706da8fa4e3SBjoern A. Zeeb }
1707da8fa4e3SBjoern A. Zeeb
ath10k_ce_deinit_pipe(struct ath10k * ar,unsigned int ce_id)1708da8fa4e3SBjoern A. Zeeb void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
1709da8fa4e3SBjoern A. Zeeb {
1710da8fa4e3SBjoern A. Zeeb ath10k_ce_deinit_src_ring(ar, ce_id);
1711da8fa4e3SBjoern A. Zeeb ath10k_ce_deinit_dest_ring(ar, ce_id);
1712da8fa4e3SBjoern A. Zeeb }
1713da8fa4e3SBjoern A. Zeeb EXPORT_SYMBOL(ath10k_ce_deinit_pipe);
1714da8fa4e3SBjoern A. Zeeb
_ath10k_ce_free_pipe(struct ath10k * ar,int ce_id)1715da8fa4e3SBjoern A. Zeeb static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
1716da8fa4e3SBjoern A. Zeeb {
1717da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
1718da8fa4e3SBjoern A. Zeeb struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1719da8fa4e3SBjoern A. Zeeb
1720da8fa4e3SBjoern A. Zeeb if (ce_state->src_ring) {
1721da8fa4e3SBjoern A. Zeeb if (ar->hw_params.shadow_reg_support)
1722da8fa4e3SBjoern A. Zeeb kfree(ce_state->src_ring->shadow_base_unaligned);
1723da8fa4e3SBjoern A. Zeeb dma_free_coherent(ar->dev,
1724da8fa4e3SBjoern A. Zeeb (ce_state->src_ring->nentries *
1725da8fa4e3SBjoern A. Zeeb sizeof(struct ce_desc) +
1726da8fa4e3SBjoern A. Zeeb CE_DESC_RING_ALIGN),
1727da8fa4e3SBjoern A. Zeeb ce_state->src_ring->base_addr_owner_space,
1728da8fa4e3SBjoern A. Zeeb ce_state->src_ring->base_addr_ce_space);
1729da8fa4e3SBjoern A. Zeeb kfree(ce_state->src_ring);
1730da8fa4e3SBjoern A. Zeeb }
1731da8fa4e3SBjoern A. Zeeb
1732da8fa4e3SBjoern A. Zeeb if (ce_state->dest_ring) {
1733da8fa4e3SBjoern A. Zeeb dma_free_coherent(ar->dev,
1734da8fa4e3SBjoern A. Zeeb (ce_state->dest_ring->nentries *
1735da8fa4e3SBjoern A. Zeeb sizeof(struct ce_desc) +
1736da8fa4e3SBjoern A. Zeeb CE_DESC_RING_ALIGN),
1737da8fa4e3SBjoern A. Zeeb ce_state->dest_ring->base_addr_owner_space,
1738da8fa4e3SBjoern A. Zeeb ce_state->dest_ring->base_addr_ce_space);
1739da8fa4e3SBjoern A. Zeeb kfree(ce_state->dest_ring);
1740da8fa4e3SBjoern A. Zeeb }
1741da8fa4e3SBjoern A. Zeeb
1742da8fa4e3SBjoern A. Zeeb ce_state->src_ring = NULL;
1743da8fa4e3SBjoern A. Zeeb ce_state->dest_ring = NULL;
1744da8fa4e3SBjoern A. Zeeb }
1745da8fa4e3SBjoern A. Zeeb
_ath10k_ce_free_pipe_64(struct ath10k * ar,int ce_id)1746da8fa4e3SBjoern A. Zeeb static void _ath10k_ce_free_pipe_64(struct ath10k *ar, int ce_id)
1747da8fa4e3SBjoern A. Zeeb {
1748da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
1749da8fa4e3SBjoern A. Zeeb struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1750da8fa4e3SBjoern A. Zeeb
1751da8fa4e3SBjoern A. Zeeb if (ce_state->src_ring) {
1752da8fa4e3SBjoern A. Zeeb if (ar->hw_params.shadow_reg_support)
1753da8fa4e3SBjoern A. Zeeb kfree(ce_state->src_ring->shadow_base_unaligned);
1754da8fa4e3SBjoern A. Zeeb dma_free_coherent(ar->dev,
1755da8fa4e3SBjoern A. Zeeb (ce_state->src_ring->nentries *
1756da8fa4e3SBjoern A. Zeeb sizeof(struct ce_desc_64) +
1757da8fa4e3SBjoern A. Zeeb CE_DESC_RING_ALIGN),
1758da8fa4e3SBjoern A. Zeeb ce_state->src_ring->base_addr_owner_space,
1759da8fa4e3SBjoern A. Zeeb ce_state->src_ring->base_addr_ce_space);
1760da8fa4e3SBjoern A. Zeeb kfree(ce_state->src_ring);
1761da8fa4e3SBjoern A. Zeeb }
1762da8fa4e3SBjoern A. Zeeb
1763da8fa4e3SBjoern A. Zeeb if (ce_state->dest_ring) {
1764da8fa4e3SBjoern A. Zeeb dma_free_coherent(ar->dev,
1765da8fa4e3SBjoern A. Zeeb (ce_state->dest_ring->nentries *
1766da8fa4e3SBjoern A. Zeeb sizeof(struct ce_desc_64) +
1767da8fa4e3SBjoern A. Zeeb CE_DESC_RING_ALIGN),
1768da8fa4e3SBjoern A. Zeeb ce_state->dest_ring->base_addr_owner_space,
1769da8fa4e3SBjoern A. Zeeb ce_state->dest_ring->base_addr_ce_space);
1770da8fa4e3SBjoern A. Zeeb kfree(ce_state->dest_ring);
1771da8fa4e3SBjoern A. Zeeb }
1772da8fa4e3SBjoern A. Zeeb
1773da8fa4e3SBjoern A. Zeeb ce_state->src_ring = NULL;
1774da8fa4e3SBjoern A. Zeeb ce_state->dest_ring = NULL;
1775da8fa4e3SBjoern A. Zeeb }
1776da8fa4e3SBjoern A. Zeeb
ath10k_ce_free_pipe(struct ath10k * ar,int ce_id)1777da8fa4e3SBjoern A. Zeeb void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
1778da8fa4e3SBjoern A. Zeeb {
1779da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
1780da8fa4e3SBjoern A. Zeeb struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1781da8fa4e3SBjoern A. Zeeb
1782da8fa4e3SBjoern A. Zeeb ce_state->ops->ce_free_pipe(ar, ce_id);
1783da8fa4e3SBjoern A. Zeeb }
1784da8fa4e3SBjoern A. Zeeb EXPORT_SYMBOL(ath10k_ce_free_pipe);
1785da8fa4e3SBjoern A. Zeeb
ath10k_ce_dump_registers(struct ath10k * ar,struct ath10k_fw_crash_data * crash_data)1786da8fa4e3SBjoern A. Zeeb void ath10k_ce_dump_registers(struct ath10k *ar,
1787da8fa4e3SBjoern A. Zeeb struct ath10k_fw_crash_data *crash_data)
1788da8fa4e3SBjoern A. Zeeb {
1789da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
1790da8fa4e3SBjoern A. Zeeb struct ath10k_ce_crash_data ce_data;
1791da8fa4e3SBjoern A. Zeeb u32 addr, id;
1792da8fa4e3SBjoern A. Zeeb
1793da8fa4e3SBjoern A. Zeeb lockdep_assert_held(&ar->dump_mutex);
1794da8fa4e3SBjoern A. Zeeb
1795da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "Copy Engine register dump:\n");
1796da8fa4e3SBjoern A. Zeeb
1797da8fa4e3SBjoern A. Zeeb spin_lock_bh(&ce->ce_lock);
1798da8fa4e3SBjoern A. Zeeb for (id = 0; id < CE_COUNT; id++) {
1799da8fa4e3SBjoern A. Zeeb addr = ath10k_ce_base_address(ar, id);
1800da8fa4e3SBjoern A. Zeeb ce_data.base_addr = cpu_to_le32(addr);
1801da8fa4e3SBjoern A. Zeeb
1802da8fa4e3SBjoern A. Zeeb ce_data.src_wr_idx =
1803da8fa4e3SBjoern A. Zeeb cpu_to_le32(ath10k_ce_src_ring_write_index_get(ar, addr));
1804da8fa4e3SBjoern A. Zeeb ce_data.src_r_idx =
1805da8fa4e3SBjoern A. Zeeb cpu_to_le32(ath10k_ce_src_ring_read_index_get(ar, addr));
1806da8fa4e3SBjoern A. Zeeb ce_data.dst_wr_idx =
1807da8fa4e3SBjoern A. Zeeb cpu_to_le32(ath10k_ce_dest_ring_write_index_get(ar, addr));
1808da8fa4e3SBjoern A. Zeeb ce_data.dst_r_idx =
1809da8fa4e3SBjoern A. Zeeb cpu_to_le32(ath10k_ce_dest_ring_read_index_get(ar, addr));
1810da8fa4e3SBjoern A. Zeeb
1811da8fa4e3SBjoern A. Zeeb if (crash_data)
1812da8fa4e3SBjoern A. Zeeb crash_data->ce_crash_data[id] = ce_data;
1813da8fa4e3SBjoern A. Zeeb
1814da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
1815da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "[%02d]: 0x%08x %3u %3u %3u %3u", id,
1816da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
1817da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "[%02d]: 0x%08x %3u %3u %3u %3u\n", id,
1818da8fa4e3SBjoern A. Zeeb #endif
1819da8fa4e3SBjoern A. Zeeb le32_to_cpu(ce_data.base_addr),
1820da8fa4e3SBjoern A. Zeeb le32_to_cpu(ce_data.src_wr_idx),
1821da8fa4e3SBjoern A. Zeeb le32_to_cpu(ce_data.src_r_idx),
1822da8fa4e3SBjoern A. Zeeb le32_to_cpu(ce_data.dst_wr_idx),
1823da8fa4e3SBjoern A. Zeeb le32_to_cpu(ce_data.dst_r_idx));
1824da8fa4e3SBjoern A. Zeeb }
1825da8fa4e3SBjoern A. Zeeb
1826da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ce->ce_lock);
1827da8fa4e3SBjoern A. Zeeb }
1828da8fa4e3SBjoern A. Zeeb EXPORT_SYMBOL(ath10k_ce_dump_registers);
1829da8fa4e3SBjoern A. Zeeb
1830da8fa4e3SBjoern A. Zeeb static const struct ath10k_ce_ops ce_ops = {
1831da8fa4e3SBjoern A. Zeeb .ce_alloc_src_ring = ath10k_ce_alloc_src_ring,
1832da8fa4e3SBjoern A. Zeeb .ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring,
1833da8fa4e3SBjoern A. Zeeb .ce_rx_post_buf = __ath10k_ce_rx_post_buf,
1834da8fa4e3SBjoern A. Zeeb .ce_completed_recv_next_nolock = _ath10k_ce_completed_recv_next_nolock,
1835da8fa4e3SBjoern A. Zeeb .ce_revoke_recv_next = _ath10k_ce_revoke_recv_next,
1836da8fa4e3SBjoern A. Zeeb .ce_extract_desc_data = ath10k_ce_extract_desc_data,
1837da8fa4e3SBjoern A. Zeeb .ce_free_pipe = _ath10k_ce_free_pipe,
1838da8fa4e3SBjoern A. Zeeb .ce_send_nolock = _ath10k_ce_send_nolock,
1839da8fa4e3SBjoern A. Zeeb .ce_set_src_ring_base_addr_hi = NULL,
1840da8fa4e3SBjoern A. Zeeb .ce_set_dest_ring_base_addr_hi = NULL,
1841da8fa4e3SBjoern A. Zeeb .ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock,
1842da8fa4e3SBjoern A. Zeeb };
1843da8fa4e3SBjoern A. Zeeb
1844da8fa4e3SBjoern A. Zeeb static const struct ath10k_ce_ops ce_64_ops = {
1845da8fa4e3SBjoern A. Zeeb .ce_alloc_src_ring = ath10k_ce_alloc_src_ring_64,
1846da8fa4e3SBjoern A. Zeeb .ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring_64,
1847da8fa4e3SBjoern A. Zeeb .ce_rx_post_buf = __ath10k_ce_rx_post_buf_64,
1848da8fa4e3SBjoern A. Zeeb .ce_completed_recv_next_nolock =
1849da8fa4e3SBjoern A. Zeeb _ath10k_ce_completed_recv_next_nolock_64,
1850da8fa4e3SBjoern A. Zeeb .ce_revoke_recv_next = _ath10k_ce_revoke_recv_next_64,
1851da8fa4e3SBjoern A. Zeeb .ce_extract_desc_data = ath10k_ce_extract_desc_data_64,
1852da8fa4e3SBjoern A. Zeeb .ce_free_pipe = _ath10k_ce_free_pipe_64,
1853da8fa4e3SBjoern A. Zeeb .ce_send_nolock = _ath10k_ce_send_nolock_64,
1854da8fa4e3SBjoern A. Zeeb .ce_set_src_ring_base_addr_hi = ath10k_ce_set_src_ring_base_addr_hi,
1855da8fa4e3SBjoern A. Zeeb .ce_set_dest_ring_base_addr_hi = ath10k_ce_set_dest_ring_base_addr_hi,
1856da8fa4e3SBjoern A. Zeeb .ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock_64,
1857da8fa4e3SBjoern A. Zeeb };
1858da8fa4e3SBjoern A. Zeeb
ath10k_ce_set_ops(struct ath10k * ar,struct ath10k_ce_pipe * ce_state)1859da8fa4e3SBjoern A. Zeeb static void ath10k_ce_set_ops(struct ath10k *ar,
1860da8fa4e3SBjoern A. Zeeb struct ath10k_ce_pipe *ce_state)
1861da8fa4e3SBjoern A. Zeeb {
1862da8fa4e3SBjoern A. Zeeb switch (ar->hw_rev) {
1863da8fa4e3SBjoern A. Zeeb case ATH10K_HW_WCN3990:
1864da8fa4e3SBjoern A. Zeeb ce_state->ops = &ce_64_ops;
1865da8fa4e3SBjoern A. Zeeb break;
1866da8fa4e3SBjoern A. Zeeb default:
1867da8fa4e3SBjoern A. Zeeb ce_state->ops = &ce_ops;
1868da8fa4e3SBjoern A. Zeeb break;
1869da8fa4e3SBjoern A. Zeeb }
1870da8fa4e3SBjoern A. Zeeb }
1871da8fa4e3SBjoern A. Zeeb
ath10k_ce_alloc_pipe(struct ath10k * ar,int ce_id,const struct ce_attr * attr)1872da8fa4e3SBjoern A. Zeeb int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
1873da8fa4e3SBjoern A. Zeeb const struct ce_attr *attr)
1874da8fa4e3SBjoern A. Zeeb {
1875da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
1876da8fa4e3SBjoern A. Zeeb struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1877da8fa4e3SBjoern A. Zeeb int ret;
1878da8fa4e3SBjoern A. Zeeb
1879da8fa4e3SBjoern A. Zeeb ath10k_ce_set_ops(ar, ce_state);
1880da8fa4e3SBjoern A. Zeeb /* Make sure there's enough CE ringbuffer entries for HTT TX to avoid
1881da8fa4e3SBjoern A. Zeeb * additional TX locking checks.
1882da8fa4e3SBjoern A. Zeeb *
1883da8fa4e3SBjoern A. Zeeb * For the lack of a better place do the check here.
1884da8fa4e3SBjoern A. Zeeb */
1885da8fa4e3SBjoern A. Zeeb BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
1886da8fa4e3SBjoern A. Zeeb (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1887da8fa4e3SBjoern A. Zeeb BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC >
1888da8fa4e3SBjoern A. Zeeb (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1889da8fa4e3SBjoern A. Zeeb BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
1890da8fa4e3SBjoern A. Zeeb (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1891da8fa4e3SBjoern A. Zeeb
1892da8fa4e3SBjoern A. Zeeb ce_state->ar = ar;
1893da8fa4e3SBjoern A. Zeeb ce_state->id = ce_id;
1894da8fa4e3SBjoern A. Zeeb ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1895da8fa4e3SBjoern A. Zeeb ce_state->attr_flags = attr->flags;
1896da8fa4e3SBjoern A. Zeeb ce_state->src_sz_max = attr->src_sz_max;
1897da8fa4e3SBjoern A. Zeeb
1898da8fa4e3SBjoern A. Zeeb if (attr->src_nentries)
1899da8fa4e3SBjoern A. Zeeb ce_state->send_cb = attr->send_cb;
1900da8fa4e3SBjoern A. Zeeb
1901da8fa4e3SBjoern A. Zeeb if (attr->dest_nentries)
1902da8fa4e3SBjoern A. Zeeb ce_state->recv_cb = attr->recv_cb;
1903da8fa4e3SBjoern A. Zeeb
1904da8fa4e3SBjoern A. Zeeb if (attr->src_nentries) {
1905da8fa4e3SBjoern A. Zeeb ce_state->src_ring =
1906da8fa4e3SBjoern A. Zeeb ce_state->ops->ce_alloc_src_ring(ar, ce_id, attr);
1907da8fa4e3SBjoern A. Zeeb if (IS_ERR(ce_state->src_ring)) {
1908da8fa4e3SBjoern A. Zeeb ret = PTR_ERR(ce_state->src_ring);
1909da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "failed to alloc CE src ring %d: %d\n",
1910da8fa4e3SBjoern A. Zeeb ce_id, ret);
1911da8fa4e3SBjoern A. Zeeb ce_state->src_ring = NULL;
1912da8fa4e3SBjoern A. Zeeb return ret;
1913da8fa4e3SBjoern A. Zeeb }
1914da8fa4e3SBjoern A. Zeeb }
1915da8fa4e3SBjoern A. Zeeb
1916da8fa4e3SBjoern A. Zeeb if (attr->dest_nentries) {
1917da8fa4e3SBjoern A. Zeeb ce_state->dest_ring = ce_state->ops->ce_alloc_dst_ring(ar,
1918da8fa4e3SBjoern A. Zeeb ce_id,
1919da8fa4e3SBjoern A. Zeeb attr);
1920da8fa4e3SBjoern A. Zeeb if (IS_ERR(ce_state->dest_ring)) {
1921da8fa4e3SBjoern A. Zeeb ret = PTR_ERR(ce_state->dest_ring);
1922da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "failed to alloc CE dest ring %d: %d\n",
1923da8fa4e3SBjoern A. Zeeb ce_id, ret);
1924da8fa4e3SBjoern A. Zeeb ce_state->dest_ring = NULL;
1925da8fa4e3SBjoern A. Zeeb return ret;
1926da8fa4e3SBjoern A. Zeeb }
1927da8fa4e3SBjoern A. Zeeb }
1928da8fa4e3SBjoern A. Zeeb
1929da8fa4e3SBjoern A. Zeeb return 0;
1930da8fa4e3SBjoern A. Zeeb }
1931da8fa4e3SBjoern A. Zeeb EXPORT_SYMBOL(ath10k_ce_alloc_pipe);
1932da8fa4e3SBjoern A. Zeeb
ath10k_ce_alloc_rri(struct ath10k * ar)1933da8fa4e3SBjoern A. Zeeb void ath10k_ce_alloc_rri(struct ath10k *ar)
1934da8fa4e3SBjoern A. Zeeb {
1935da8fa4e3SBjoern A. Zeeb int i;
1936da8fa4e3SBjoern A. Zeeb u32 value;
1937da8fa4e3SBjoern A. Zeeb u32 ctrl1_regs;
1938da8fa4e3SBjoern A. Zeeb u32 ce_base_addr;
1939da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
1940da8fa4e3SBjoern A. Zeeb
1941da8fa4e3SBjoern A. Zeeb ce->vaddr_rri = dma_alloc_coherent(ar->dev,
1942da8fa4e3SBjoern A. Zeeb (CE_COUNT * sizeof(u32)),
1943da8fa4e3SBjoern A. Zeeb &ce->paddr_rri, GFP_KERNEL);
1944da8fa4e3SBjoern A. Zeeb
1945da8fa4e3SBjoern A. Zeeb if (!ce->vaddr_rri)
1946da8fa4e3SBjoern A. Zeeb return;
1947da8fa4e3SBjoern A. Zeeb
1948da8fa4e3SBjoern A. Zeeb ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_low,
1949da8fa4e3SBjoern A. Zeeb lower_32_bits(ce->paddr_rri));
1950da8fa4e3SBjoern A. Zeeb ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_high,
1951da8fa4e3SBjoern A. Zeeb (upper_32_bits(ce->paddr_rri) &
1952da8fa4e3SBjoern A. Zeeb CE_DESC_ADDR_HI_MASK));
1953da8fa4e3SBjoern A. Zeeb
1954da8fa4e3SBjoern A. Zeeb for (i = 0; i < CE_COUNT; i++) {
1955da8fa4e3SBjoern A. Zeeb ctrl1_regs = ar->hw_ce_regs->ctrl1_regs->addr;
1956da8fa4e3SBjoern A. Zeeb ce_base_addr = ath10k_ce_base_address(ar, i);
1957da8fa4e3SBjoern A. Zeeb value = ath10k_ce_read32(ar, ce_base_addr + ctrl1_regs);
1958da8fa4e3SBjoern A. Zeeb value |= ar->hw_ce_regs->upd->mask;
1959da8fa4e3SBjoern A. Zeeb ath10k_ce_write32(ar, ce_base_addr + ctrl1_regs, value);
1960da8fa4e3SBjoern A. Zeeb }
1961da8fa4e3SBjoern A. Zeeb }
1962da8fa4e3SBjoern A. Zeeb EXPORT_SYMBOL(ath10k_ce_alloc_rri);
1963da8fa4e3SBjoern A. Zeeb
ath10k_ce_free_rri(struct ath10k * ar)1964da8fa4e3SBjoern A. Zeeb void ath10k_ce_free_rri(struct ath10k *ar)
1965da8fa4e3SBjoern A. Zeeb {
1966da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
1967da8fa4e3SBjoern A. Zeeb
1968da8fa4e3SBjoern A. Zeeb dma_free_coherent(ar->dev, (CE_COUNT * sizeof(u32)),
1969da8fa4e3SBjoern A. Zeeb ce->vaddr_rri,
1970da8fa4e3SBjoern A. Zeeb ce->paddr_rri);
1971da8fa4e3SBjoern A. Zeeb }
1972da8fa4e3SBjoern A. Zeeb EXPORT_SYMBOL(ath10k_ce_free_rri);
1973