1dd4f32aeSBjoern A. Zeeb // SPDX-License-Identifier: BSD-3-Clause-Clear
2dd4f32aeSBjoern A. Zeeb /*
3dd4f32aeSBjoern A. Zeeb * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4*28348caeSBjoern A. Zeeb * Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
5dd4f32aeSBjoern A. Zeeb */
6dd4f32aeSBjoern A. Zeeb
7dd4f32aeSBjoern A. Zeeb #include "dp_rx.h"
8dd4f32aeSBjoern A. Zeeb #include "debug.h"
9dd4f32aeSBjoern A. Zeeb #include "hif.h"
10dd4f32aeSBjoern A. Zeeb
11dd4f32aeSBjoern A. Zeeb const struct ce_attr ath11k_host_ce_config_ipq8074[] = {
12dd4f32aeSBjoern A. Zeeb /* CE0: host->target HTC control and raw streams */
13dd4f32aeSBjoern A. Zeeb {
14dd4f32aeSBjoern A. Zeeb .flags = CE_ATTR_FLAGS,
15dd4f32aeSBjoern A. Zeeb .src_nentries = 16,
16dd4f32aeSBjoern A. Zeeb .src_sz_max = 2048,
17dd4f32aeSBjoern A. Zeeb .dest_nentries = 0,
18dd4f32aeSBjoern A. Zeeb .send_cb = ath11k_htc_tx_completion_handler,
19dd4f32aeSBjoern A. Zeeb },
20dd4f32aeSBjoern A. Zeeb
21dd4f32aeSBjoern A. Zeeb /* CE1: target->host HTT + HTC control */
22dd4f32aeSBjoern A. Zeeb {
23dd4f32aeSBjoern A. Zeeb .flags = CE_ATTR_FLAGS,
24dd4f32aeSBjoern A. Zeeb .src_nentries = 0,
25dd4f32aeSBjoern A. Zeeb .src_sz_max = 2048,
26dd4f32aeSBjoern A. Zeeb .dest_nentries = 512,
27dd4f32aeSBjoern A. Zeeb .recv_cb = ath11k_htc_rx_completion_handler,
28dd4f32aeSBjoern A. Zeeb },
29dd4f32aeSBjoern A. Zeeb
30dd4f32aeSBjoern A. Zeeb /* CE2: target->host WMI */
31dd4f32aeSBjoern A. Zeeb {
32dd4f32aeSBjoern A. Zeeb .flags = CE_ATTR_FLAGS,
33dd4f32aeSBjoern A. Zeeb .src_nentries = 0,
34dd4f32aeSBjoern A. Zeeb .src_sz_max = 2048,
35dd4f32aeSBjoern A. Zeeb .dest_nentries = 512,
36dd4f32aeSBjoern A. Zeeb .recv_cb = ath11k_htc_rx_completion_handler,
37dd4f32aeSBjoern A. Zeeb },
38dd4f32aeSBjoern A. Zeeb
39dd4f32aeSBjoern A. Zeeb /* CE3: host->target WMI (mac0) */
40dd4f32aeSBjoern A. Zeeb {
41dd4f32aeSBjoern A. Zeeb .flags = CE_ATTR_FLAGS,
42dd4f32aeSBjoern A. Zeeb .src_nentries = 32,
43dd4f32aeSBjoern A. Zeeb .src_sz_max = 2048,
44dd4f32aeSBjoern A. Zeeb .dest_nentries = 0,
45dd4f32aeSBjoern A. Zeeb .send_cb = ath11k_htc_tx_completion_handler,
46dd4f32aeSBjoern A. Zeeb },
47dd4f32aeSBjoern A. Zeeb
48dd4f32aeSBjoern A. Zeeb /* CE4: host->target HTT */
49dd4f32aeSBjoern A. Zeeb {
50dd4f32aeSBjoern A. Zeeb .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
51dd4f32aeSBjoern A. Zeeb .src_nentries = 2048,
52dd4f32aeSBjoern A. Zeeb .src_sz_max = 256,
53dd4f32aeSBjoern A. Zeeb .dest_nentries = 0,
54dd4f32aeSBjoern A. Zeeb },
55dd4f32aeSBjoern A. Zeeb
56dd4f32aeSBjoern A. Zeeb /* CE5: target->host pktlog */
57dd4f32aeSBjoern A. Zeeb {
58dd4f32aeSBjoern A. Zeeb .flags = CE_ATTR_FLAGS,
59dd4f32aeSBjoern A. Zeeb .src_nentries = 0,
60dd4f32aeSBjoern A. Zeeb .src_sz_max = 2048,
61dd4f32aeSBjoern A. Zeeb .dest_nentries = 512,
62dd4f32aeSBjoern A. Zeeb .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
63dd4f32aeSBjoern A. Zeeb },
64dd4f32aeSBjoern A. Zeeb
65dd4f32aeSBjoern A. Zeeb /* CE6: target autonomous hif_memcpy */
66dd4f32aeSBjoern A. Zeeb {
67dd4f32aeSBjoern A. Zeeb .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
68dd4f32aeSBjoern A. Zeeb .src_nentries = 0,
69dd4f32aeSBjoern A. Zeeb .src_sz_max = 0,
70dd4f32aeSBjoern A. Zeeb .dest_nentries = 0,
71dd4f32aeSBjoern A. Zeeb },
72dd4f32aeSBjoern A. Zeeb
73dd4f32aeSBjoern A. Zeeb /* CE7: host->target WMI (mac1) */
74dd4f32aeSBjoern A. Zeeb {
75dd4f32aeSBjoern A. Zeeb .flags = CE_ATTR_FLAGS,
76dd4f32aeSBjoern A. Zeeb .src_nentries = 32,
77dd4f32aeSBjoern A. Zeeb .src_sz_max = 2048,
78dd4f32aeSBjoern A. Zeeb .dest_nentries = 0,
79dd4f32aeSBjoern A. Zeeb .send_cb = ath11k_htc_tx_completion_handler,
80dd4f32aeSBjoern A. Zeeb },
81dd4f32aeSBjoern A. Zeeb
82dd4f32aeSBjoern A. Zeeb /* CE8: target autonomous hif_memcpy */
83dd4f32aeSBjoern A. Zeeb {
84dd4f32aeSBjoern A. Zeeb .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
85dd4f32aeSBjoern A. Zeeb .src_nentries = 0,
86dd4f32aeSBjoern A. Zeeb .src_sz_max = 0,
87dd4f32aeSBjoern A. Zeeb .dest_nentries = 0,
88dd4f32aeSBjoern A. Zeeb },
89dd4f32aeSBjoern A. Zeeb
90dd4f32aeSBjoern A. Zeeb /* CE9: host->target WMI (mac2) */
91dd4f32aeSBjoern A. Zeeb {
92dd4f32aeSBjoern A. Zeeb .flags = CE_ATTR_FLAGS,
93dd4f32aeSBjoern A. Zeeb .src_nentries = 32,
94dd4f32aeSBjoern A. Zeeb .src_sz_max = 2048,
95dd4f32aeSBjoern A. Zeeb .dest_nentries = 0,
96dd4f32aeSBjoern A. Zeeb .send_cb = ath11k_htc_tx_completion_handler,
97dd4f32aeSBjoern A. Zeeb },
98dd4f32aeSBjoern A. Zeeb
99dd4f32aeSBjoern A. Zeeb /* CE10: target->host HTT */
100dd4f32aeSBjoern A. Zeeb {
101dd4f32aeSBjoern A. Zeeb .flags = CE_ATTR_FLAGS,
102dd4f32aeSBjoern A. Zeeb .src_nentries = 0,
103dd4f32aeSBjoern A. Zeeb .src_sz_max = 2048,
104dd4f32aeSBjoern A. Zeeb .dest_nentries = 512,
105dd4f32aeSBjoern A. Zeeb .recv_cb = ath11k_htc_rx_completion_handler,
106dd4f32aeSBjoern A. Zeeb },
107dd4f32aeSBjoern A. Zeeb
108dd4f32aeSBjoern A. Zeeb /* CE11: Not used */
109dd4f32aeSBjoern A. Zeeb {
110dd4f32aeSBjoern A. Zeeb .flags = CE_ATTR_FLAGS,
111dd4f32aeSBjoern A. Zeeb .src_nentries = 0,
112dd4f32aeSBjoern A. Zeeb .src_sz_max = 0,
113dd4f32aeSBjoern A. Zeeb .dest_nentries = 0,
114dd4f32aeSBjoern A. Zeeb },
115dd4f32aeSBjoern A. Zeeb };
116dd4f32aeSBjoern A. Zeeb
117dd4f32aeSBjoern A. Zeeb const struct ce_attr ath11k_host_ce_config_qca6390[] = {
118dd4f32aeSBjoern A. Zeeb /* CE0: host->target HTC control and raw streams */
119dd4f32aeSBjoern A. Zeeb {
120dd4f32aeSBjoern A. Zeeb .flags = CE_ATTR_FLAGS,
121dd4f32aeSBjoern A. Zeeb .src_nentries = 16,
122dd4f32aeSBjoern A. Zeeb .src_sz_max = 2048,
123dd4f32aeSBjoern A. Zeeb .dest_nentries = 0,
124dd4f32aeSBjoern A. Zeeb },
125dd4f32aeSBjoern A. Zeeb
126dd4f32aeSBjoern A. Zeeb /* CE1: target->host HTT + HTC control */
127dd4f32aeSBjoern A. Zeeb {
128dd4f32aeSBjoern A. Zeeb .flags = CE_ATTR_FLAGS,
129dd4f32aeSBjoern A. Zeeb .src_nentries = 0,
130dd4f32aeSBjoern A. Zeeb .src_sz_max = 2048,
131dd4f32aeSBjoern A. Zeeb .dest_nentries = 512,
132dd4f32aeSBjoern A. Zeeb .recv_cb = ath11k_htc_rx_completion_handler,
133dd4f32aeSBjoern A. Zeeb },
134dd4f32aeSBjoern A. Zeeb
135dd4f32aeSBjoern A. Zeeb /* CE2: target->host WMI */
136dd4f32aeSBjoern A. Zeeb {
137dd4f32aeSBjoern A. Zeeb .flags = CE_ATTR_FLAGS,
138dd4f32aeSBjoern A. Zeeb .src_nentries = 0,
139dd4f32aeSBjoern A. Zeeb .src_sz_max = 2048,
140dd4f32aeSBjoern A. Zeeb .dest_nentries = 512,
141dd4f32aeSBjoern A. Zeeb .recv_cb = ath11k_htc_rx_completion_handler,
142dd4f32aeSBjoern A. Zeeb },
143dd4f32aeSBjoern A. Zeeb
144dd4f32aeSBjoern A. Zeeb /* CE3: host->target WMI (mac0) */
145dd4f32aeSBjoern A. Zeeb {
146dd4f32aeSBjoern A. Zeeb .flags = CE_ATTR_FLAGS,
147dd4f32aeSBjoern A. Zeeb .src_nentries = 32,
148dd4f32aeSBjoern A. Zeeb .src_sz_max = 2048,
149dd4f32aeSBjoern A. Zeeb .dest_nentries = 0,
150dd4f32aeSBjoern A. Zeeb .send_cb = ath11k_htc_tx_completion_handler,
151dd4f32aeSBjoern A. Zeeb },
152dd4f32aeSBjoern A. Zeeb
153dd4f32aeSBjoern A. Zeeb /* CE4: host->target HTT */
154dd4f32aeSBjoern A. Zeeb {
155dd4f32aeSBjoern A. Zeeb .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
156dd4f32aeSBjoern A. Zeeb .src_nentries = 2048,
157dd4f32aeSBjoern A. Zeeb .src_sz_max = 256,
158dd4f32aeSBjoern A. Zeeb .dest_nentries = 0,
159dd4f32aeSBjoern A. Zeeb },
160dd4f32aeSBjoern A. Zeeb
161dd4f32aeSBjoern A. Zeeb /* CE5: target->host pktlog */
162dd4f32aeSBjoern A. Zeeb {
163dd4f32aeSBjoern A. Zeeb .flags = CE_ATTR_FLAGS,
164dd4f32aeSBjoern A. Zeeb .src_nentries = 0,
165dd4f32aeSBjoern A. Zeeb .src_sz_max = 2048,
166dd4f32aeSBjoern A. Zeeb .dest_nentries = 512,
167dd4f32aeSBjoern A. Zeeb .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
168dd4f32aeSBjoern A. Zeeb },
169dd4f32aeSBjoern A. Zeeb
170dd4f32aeSBjoern A. Zeeb /* CE6: target autonomous hif_memcpy */
171dd4f32aeSBjoern A. Zeeb {
172dd4f32aeSBjoern A. Zeeb .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
173dd4f32aeSBjoern A. Zeeb .src_nentries = 0,
174dd4f32aeSBjoern A. Zeeb .src_sz_max = 0,
175dd4f32aeSBjoern A. Zeeb .dest_nentries = 0,
176dd4f32aeSBjoern A. Zeeb },
177dd4f32aeSBjoern A. Zeeb
178dd4f32aeSBjoern A. Zeeb /* CE7: host->target WMI (mac1) */
179dd4f32aeSBjoern A. Zeeb {
180dd4f32aeSBjoern A. Zeeb .flags = CE_ATTR_FLAGS,
181dd4f32aeSBjoern A. Zeeb .src_nentries = 32,
182dd4f32aeSBjoern A. Zeeb .src_sz_max = 2048,
183dd4f32aeSBjoern A. Zeeb .dest_nentries = 0,
184dd4f32aeSBjoern A. Zeeb .send_cb = ath11k_htc_tx_completion_handler,
185dd4f32aeSBjoern A. Zeeb },
186dd4f32aeSBjoern A. Zeeb
187dd4f32aeSBjoern A. Zeeb /* CE8: target autonomous hif_memcpy */
188dd4f32aeSBjoern A. Zeeb {
189dd4f32aeSBjoern A. Zeeb .flags = CE_ATTR_FLAGS,
190dd4f32aeSBjoern A. Zeeb .src_nentries = 0,
191dd4f32aeSBjoern A. Zeeb .src_sz_max = 0,
192dd4f32aeSBjoern A. Zeeb .dest_nentries = 0,
193dd4f32aeSBjoern A. Zeeb },
194dd4f32aeSBjoern A. Zeeb
195dd4f32aeSBjoern A. Zeeb };
196dd4f32aeSBjoern A. Zeeb
197dd4f32aeSBjoern A. Zeeb const struct ce_attr ath11k_host_ce_config_qcn9074[] = {
198dd4f32aeSBjoern A. Zeeb /* CE0: host->target HTC control and raw streams */
199dd4f32aeSBjoern A. Zeeb {
200dd4f32aeSBjoern A. Zeeb .flags = CE_ATTR_FLAGS,
201dd4f32aeSBjoern A. Zeeb .src_nentries = 16,
202dd4f32aeSBjoern A. Zeeb .src_sz_max = 2048,
203dd4f32aeSBjoern A. Zeeb .dest_nentries = 0,
204dd4f32aeSBjoern A. Zeeb },
205dd4f32aeSBjoern A. Zeeb
206dd4f32aeSBjoern A. Zeeb /* CE1: target->host HTT + HTC control */
207dd4f32aeSBjoern A. Zeeb {
208dd4f32aeSBjoern A. Zeeb .flags = CE_ATTR_FLAGS,
209dd4f32aeSBjoern A. Zeeb .src_nentries = 0,
210dd4f32aeSBjoern A. Zeeb .src_sz_max = 2048,
211dd4f32aeSBjoern A. Zeeb .dest_nentries = 512,
212dd4f32aeSBjoern A. Zeeb .recv_cb = ath11k_htc_rx_completion_handler,
213dd4f32aeSBjoern A. Zeeb },
214dd4f32aeSBjoern A. Zeeb
215dd4f32aeSBjoern A. Zeeb /* CE2: target->host WMI */
216dd4f32aeSBjoern A. Zeeb {
217dd4f32aeSBjoern A. Zeeb .flags = CE_ATTR_FLAGS,
218dd4f32aeSBjoern A. Zeeb .src_nentries = 0,
219dd4f32aeSBjoern A. Zeeb .src_sz_max = 2048,
220dd4f32aeSBjoern A. Zeeb .dest_nentries = 32,
221dd4f32aeSBjoern A. Zeeb .recv_cb = ath11k_htc_rx_completion_handler,
222dd4f32aeSBjoern A. Zeeb },
223dd4f32aeSBjoern A. Zeeb
224dd4f32aeSBjoern A. Zeeb /* CE3: host->target WMI (mac0) */
225dd4f32aeSBjoern A. Zeeb {
226dd4f32aeSBjoern A. Zeeb .flags = CE_ATTR_FLAGS,
227dd4f32aeSBjoern A. Zeeb .src_nentries = 32,
228dd4f32aeSBjoern A. Zeeb .src_sz_max = 2048,
229dd4f32aeSBjoern A. Zeeb .dest_nentries = 0,
230dd4f32aeSBjoern A. Zeeb .send_cb = ath11k_htc_tx_completion_handler,
231dd4f32aeSBjoern A. Zeeb },
232dd4f32aeSBjoern A. Zeeb
233dd4f32aeSBjoern A. Zeeb /* CE4: host->target HTT */
234dd4f32aeSBjoern A. Zeeb {
235dd4f32aeSBjoern A. Zeeb .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
236dd4f32aeSBjoern A. Zeeb .src_nentries = 2048,
237dd4f32aeSBjoern A. Zeeb .src_sz_max = 256,
238dd4f32aeSBjoern A. Zeeb .dest_nentries = 0,
239dd4f32aeSBjoern A. Zeeb },
240dd4f32aeSBjoern A. Zeeb
241dd4f32aeSBjoern A. Zeeb /* CE5: target->host pktlog */
242dd4f32aeSBjoern A. Zeeb {
243dd4f32aeSBjoern A. Zeeb .flags = CE_ATTR_FLAGS,
244dd4f32aeSBjoern A. Zeeb .src_nentries = 0,
245dd4f32aeSBjoern A. Zeeb .src_sz_max = 2048,
246dd4f32aeSBjoern A. Zeeb .dest_nentries = 512,
247dd4f32aeSBjoern A. Zeeb .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
248dd4f32aeSBjoern A. Zeeb },
249dd4f32aeSBjoern A. Zeeb };
250dd4f32aeSBjoern A. Zeeb
ath11k_ce_need_shadow_fix(int ce_id)251dd4f32aeSBjoern A. Zeeb static bool ath11k_ce_need_shadow_fix(int ce_id)
252dd4f32aeSBjoern A. Zeeb {
253*28348caeSBjoern A. Zeeb /* only ce4 needs shadow workaround */
254dd4f32aeSBjoern A. Zeeb if (ce_id == 4)
255dd4f32aeSBjoern A. Zeeb return true;
256dd4f32aeSBjoern A. Zeeb return false;
257dd4f32aeSBjoern A. Zeeb }
258dd4f32aeSBjoern A. Zeeb
ath11k_ce_stop_shadow_timers(struct ath11k_base * ab)259dd4f32aeSBjoern A. Zeeb void ath11k_ce_stop_shadow_timers(struct ath11k_base *ab)
260dd4f32aeSBjoern A. Zeeb {
261dd4f32aeSBjoern A. Zeeb int i;
262dd4f32aeSBjoern A. Zeeb
263dd4f32aeSBjoern A. Zeeb if (!ab->hw_params.supports_shadow_regs)
264dd4f32aeSBjoern A. Zeeb return;
265dd4f32aeSBjoern A. Zeeb
266dd4f32aeSBjoern A. Zeeb for (i = 0; i < ab->hw_params.ce_count; i++)
267dd4f32aeSBjoern A. Zeeb if (ath11k_ce_need_shadow_fix(i))
268dd4f32aeSBjoern A. Zeeb ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]);
269dd4f32aeSBjoern A. Zeeb }
270dd4f32aeSBjoern A. Zeeb
ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe * pipe,struct sk_buff * skb,dma_addr_t paddr)271dd4f32aeSBjoern A. Zeeb static int ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe *pipe,
272dd4f32aeSBjoern A. Zeeb struct sk_buff *skb, dma_addr_t paddr)
273dd4f32aeSBjoern A. Zeeb {
274dd4f32aeSBjoern A. Zeeb struct ath11k_base *ab = pipe->ab;
275dd4f32aeSBjoern A. Zeeb struct ath11k_ce_ring *ring = pipe->dest_ring;
276dd4f32aeSBjoern A. Zeeb struct hal_srng *srng;
277dd4f32aeSBjoern A. Zeeb unsigned int write_index;
278dd4f32aeSBjoern A. Zeeb unsigned int nentries_mask = ring->nentries_mask;
279dd4f32aeSBjoern A. Zeeb u32 *desc;
280dd4f32aeSBjoern A. Zeeb int ret;
281dd4f32aeSBjoern A. Zeeb
282dd4f32aeSBjoern A. Zeeb lockdep_assert_held(&ab->ce.ce_lock);
283dd4f32aeSBjoern A. Zeeb
284dd4f32aeSBjoern A. Zeeb write_index = ring->write_index;
285dd4f32aeSBjoern A. Zeeb
286dd4f32aeSBjoern A. Zeeb srng = &ab->hal.srng_list[ring->hal_ring_id];
287dd4f32aeSBjoern A. Zeeb
288dd4f32aeSBjoern A. Zeeb spin_lock_bh(&srng->lock);
289dd4f32aeSBjoern A. Zeeb
290dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_begin(ab, srng);
291dd4f32aeSBjoern A. Zeeb
292dd4f32aeSBjoern A. Zeeb if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
293dd4f32aeSBjoern A. Zeeb ret = -ENOSPC;
294dd4f32aeSBjoern A. Zeeb goto exit;
295dd4f32aeSBjoern A. Zeeb }
296dd4f32aeSBjoern A. Zeeb
297dd4f32aeSBjoern A. Zeeb desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
298dd4f32aeSBjoern A. Zeeb if (!desc) {
299dd4f32aeSBjoern A. Zeeb ret = -ENOSPC;
300dd4f32aeSBjoern A. Zeeb goto exit;
301dd4f32aeSBjoern A. Zeeb }
302dd4f32aeSBjoern A. Zeeb
303dd4f32aeSBjoern A. Zeeb ath11k_hal_ce_dst_set_desc(desc, paddr);
304dd4f32aeSBjoern A. Zeeb
305dd4f32aeSBjoern A. Zeeb ring->skb[write_index] = skb;
306dd4f32aeSBjoern A. Zeeb write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
307dd4f32aeSBjoern A. Zeeb ring->write_index = write_index;
308dd4f32aeSBjoern A. Zeeb
309dd4f32aeSBjoern A. Zeeb pipe->rx_buf_needed--;
310dd4f32aeSBjoern A. Zeeb
311dd4f32aeSBjoern A. Zeeb ret = 0;
312dd4f32aeSBjoern A. Zeeb exit:
313dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_end(ab, srng);
314dd4f32aeSBjoern A. Zeeb
315dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&srng->lock);
316dd4f32aeSBjoern A. Zeeb
317dd4f32aeSBjoern A. Zeeb return ret;
318dd4f32aeSBjoern A. Zeeb }
319dd4f32aeSBjoern A. Zeeb
ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe * pipe)320dd4f32aeSBjoern A. Zeeb static int ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe *pipe)
321dd4f32aeSBjoern A. Zeeb {
322dd4f32aeSBjoern A. Zeeb struct ath11k_base *ab = pipe->ab;
323dd4f32aeSBjoern A. Zeeb struct sk_buff *skb;
324dd4f32aeSBjoern A. Zeeb dma_addr_t paddr;
325dd4f32aeSBjoern A. Zeeb int ret = 0;
326dd4f32aeSBjoern A. Zeeb
327dd4f32aeSBjoern A. Zeeb if (!(pipe->dest_ring || pipe->status_ring))
328dd4f32aeSBjoern A. Zeeb return 0;
329dd4f32aeSBjoern A. Zeeb
330dd4f32aeSBjoern A. Zeeb spin_lock_bh(&ab->ce.ce_lock);
331dd4f32aeSBjoern A. Zeeb while (pipe->rx_buf_needed) {
332dd4f32aeSBjoern A. Zeeb skb = dev_alloc_skb(pipe->buf_sz);
333dd4f32aeSBjoern A. Zeeb if (!skb) {
334dd4f32aeSBjoern A. Zeeb ret = -ENOMEM;
335dd4f32aeSBjoern A. Zeeb goto exit;
336dd4f32aeSBjoern A. Zeeb }
337dd4f32aeSBjoern A. Zeeb
338dd4f32aeSBjoern A. Zeeb WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
339dd4f32aeSBjoern A. Zeeb
340dd4f32aeSBjoern A. Zeeb paddr = dma_map_single(ab->dev, skb->data,
341dd4f32aeSBjoern A. Zeeb skb->len + skb_tailroom(skb),
342dd4f32aeSBjoern A. Zeeb DMA_FROM_DEVICE);
343dd4f32aeSBjoern A. Zeeb if (unlikely(dma_mapping_error(ab->dev, paddr))) {
344dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "failed to dma map ce rx buf\n");
345dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(skb);
346dd4f32aeSBjoern A. Zeeb ret = -EIO;
347dd4f32aeSBjoern A. Zeeb goto exit;
348dd4f32aeSBjoern A. Zeeb }
349dd4f32aeSBjoern A. Zeeb
350dd4f32aeSBjoern A. Zeeb ATH11K_SKB_RXCB(skb)->paddr = paddr;
351dd4f32aeSBjoern A. Zeeb
352dd4f32aeSBjoern A. Zeeb ret = ath11k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
353dd4f32aeSBjoern A. Zeeb
354dd4f32aeSBjoern A. Zeeb if (ret) {
355dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "failed to enqueue rx buf: %d\n", ret);
356dd4f32aeSBjoern A. Zeeb dma_unmap_single(ab->dev, paddr,
357dd4f32aeSBjoern A. Zeeb skb->len + skb_tailroom(skb),
358dd4f32aeSBjoern A. Zeeb DMA_FROM_DEVICE);
359dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(skb);
360dd4f32aeSBjoern A. Zeeb goto exit;
361dd4f32aeSBjoern A. Zeeb }
362dd4f32aeSBjoern A. Zeeb }
363dd4f32aeSBjoern A. Zeeb
364dd4f32aeSBjoern A. Zeeb exit:
365dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ab->ce.ce_lock);
366dd4f32aeSBjoern A. Zeeb return ret;
367dd4f32aeSBjoern A. Zeeb }
368dd4f32aeSBjoern A. Zeeb
ath11k_ce_completed_recv_next(struct ath11k_ce_pipe * pipe,struct sk_buff ** skb,int * nbytes)369dd4f32aeSBjoern A. Zeeb static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe,
370dd4f32aeSBjoern A. Zeeb struct sk_buff **skb, int *nbytes)
371dd4f32aeSBjoern A. Zeeb {
372dd4f32aeSBjoern A. Zeeb struct ath11k_base *ab = pipe->ab;
373dd4f32aeSBjoern A. Zeeb struct hal_srng *srng;
374dd4f32aeSBjoern A. Zeeb unsigned int sw_index;
375dd4f32aeSBjoern A. Zeeb unsigned int nentries_mask;
376dd4f32aeSBjoern A. Zeeb u32 *desc;
377dd4f32aeSBjoern A. Zeeb int ret = 0;
378dd4f32aeSBjoern A. Zeeb
379dd4f32aeSBjoern A. Zeeb spin_lock_bh(&ab->ce.ce_lock);
380dd4f32aeSBjoern A. Zeeb
381dd4f32aeSBjoern A. Zeeb sw_index = pipe->dest_ring->sw_index;
382dd4f32aeSBjoern A. Zeeb nentries_mask = pipe->dest_ring->nentries_mask;
383dd4f32aeSBjoern A. Zeeb
384dd4f32aeSBjoern A. Zeeb srng = &ab->hal.srng_list[pipe->status_ring->hal_ring_id];
385dd4f32aeSBjoern A. Zeeb
386dd4f32aeSBjoern A. Zeeb spin_lock_bh(&srng->lock);
387dd4f32aeSBjoern A. Zeeb
388dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_begin(ab, srng);
389dd4f32aeSBjoern A. Zeeb
390dd4f32aeSBjoern A. Zeeb desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
391dd4f32aeSBjoern A. Zeeb if (!desc) {
392dd4f32aeSBjoern A. Zeeb ret = -EIO;
393dd4f32aeSBjoern A. Zeeb goto err;
394dd4f32aeSBjoern A. Zeeb }
395dd4f32aeSBjoern A. Zeeb
396dd4f32aeSBjoern A. Zeeb *nbytes = ath11k_hal_ce_dst_status_get_length(desc);
397dd4f32aeSBjoern A. Zeeb if (*nbytes == 0) {
398dd4f32aeSBjoern A. Zeeb ret = -EIO;
399dd4f32aeSBjoern A. Zeeb goto err;
400dd4f32aeSBjoern A. Zeeb }
401dd4f32aeSBjoern A. Zeeb
402dd4f32aeSBjoern A. Zeeb *skb = pipe->dest_ring->skb[sw_index];
403dd4f32aeSBjoern A. Zeeb pipe->dest_ring->skb[sw_index] = NULL;
404dd4f32aeSBjoern A. Zeeb
405dd4f32aeSBjoern A. Zeeb sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
406dd4f32aeSBjoern A. Zeeb pipe->dest_ring->sw_index = sw_index;
407dd4f32aeSBjoern A. Zeeb
408dd4f32aeSBjoern A. Zeeb pipe->rx_buf_needed++;
409dd4f32aeSBjoern A. Zeeb err:
410dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_end(ab, srng);
411dd4f32aeSBjoern A. Zeeb
412dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&srng->lock);
413dd4f32aeSBjoern A. Zeeb
414dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ab->ce.ce_lock);
415dd4f32aeSBjoern A. Zeeb
416dd4f32aeSBjoern A. Zeeb return ret;
417dd4f32aeSBjoern A. Zeeb }
418dd4f32aeSBjoern A. Zeeb
ath11k_ce_recv_process_cb(struct ath11k_ce_pipe * pipe)419dd4f32aeSBjoern A. Zeeb static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe)
420dd4f32aeSBjoern A. Zeeb {
421dd4f32aeSBjoern A. Zeeb struct ath11k_base *ab = pipe->ab;
422dd4f32aeSBjoern A. Zeeb struct sk_buff *skb;
423dd4f32aeSBjoern A. Zeeb struct sk_buff_head list;
424dd4f32aeSBjoern A. Zeeb unsigned int nbytes, max_nbytes;
425dd4f32aeSBjoern A. Zeeb int ret;
426dd4f32aeSBjoern A. Zeeb
427dd4f32aeSBjoern A. Zeeb __skb_queue_head_init(&list);
428dd4f32aeSBjoern A. Zeeb while (ath11k_ce_completed_recv_next(pipe, &skb, &nbytes) == 0) {
429dd4f32aeSBjoern A. Zeeb max_nbytes = skb->len + skb_tailroom(skb);
430dd4f32aeSBjoern A. Zeeb dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
431dd4f32aeSBjoern A. Zeeb max_nbytes, DMA_FROM_DEVICE);
432dd4f32aeSBjoern A. Zeeb
433dd4f32aeSBjoern A. Zeeb if (unlikely(max_nbytes < nbytes)) {
434dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "rxed more than expected (nbytes %d, max %d)",
435dd4f32aeSBjoern A. Zeeb nbytes, max_nbytes);
436dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(skb);
437dd4f32aeSBjoern A. Zeeb continue;
438dd4f32aeSBjoern A. Zeeb }
439dd4f32aeSBjoern A. Zeeb
440dd4f32aeSBjoern A. Zeeb skb_put(skb, nbytes);
441dd4f32aeSBjoern A. Zeeb __skb_queue_tail(&list, skb);
442dd4f32aeSBjoern A. Zeeb }
443dd4f32aeSBjoern A. Zeeb
444dd4f32aeSBjoern A. Zeeb while ((skb = __skb_dequeue(&list))) {
445*28348caeSBjoern A. Zeeb ath11k_dbg(ab, ATH11K_DBG_CE, "rx ce pipe %d len %d\n",
446dd4f32aeSBjoern A. Zeeb pipe->pipe_num, skb->len);
447dd4f32aeSBjoern A. Zeeb pipe->recv_cb(ab, skb);
448dd4f32aeSBjoern A. Zeeb }
449dd4f32aeSBjoern A. Zeeb
450dd4f32aeSBjoern A. Zeeb ret = ath11k_ce_rx_post_pipe(pipe);
451dd4f32aeSBjoern A. Zeeb if (ret && ret != -ENOSPC) {
452dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
453dd4f32aeSBjoern A. Zeeb pipe->pipe_num, ret);
454dd4f32aeSBjoern A. Zeeb mod_timer(&ab->rx_replenish_retry,
455dd4f32aeSBjoern A. Zeeb jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
456dd4f32aeSBjoern A. Zeeb }
457dd4f32aeSBjoern A. Zeeb }
458dd4f32aeSBjoern A. Zeeb
ath11k_ce_completed_send_next(struct ath11k_ce_pipe * pipe)459dd4f32aeSBjoern A. Zeeb static struct sk_buff *ath11k_ce_completed_send_next(struct ath11k_ce_pipe *pipe)
460dd4f32aeSBjoern A. Zeeb {
461dd4f32aeSBjoern A. Zeeb struct ath11k_base *ab = pipe->ab;
462dd4f32aeSBjoern A. Zeeb struct hal_srng *srng;
463dd4f32aeSBjoern A. Zeeb unsigned int sw_index;
464dd4f32aeSBjoern A. Zeeb unsigned int nentries_mask;
465dd4f32aeSBjoern A. Zeeb struct sk_buff *skb;
466dd4f32aeSBjoern A. Zeeb u32 *desc;
467dd4f32aeSBjoern A. Zeeb
468dd4f32aeSBjoern A. Zeeb spin_lock_bh(&ab->ce.ce_lock);
469dd4f32aeSBjoern A. Zeeb
470dd4f32aeSBjoern A. Zeeb sw_index = pipe->src_ring->sw_index;
471dd4f32aeSBjoern A. Zeeb nentries_mask = pipe->src_ring->nentries_mask;
472dd4f32aeSBjoern A. Zeeb
473dd4f32aeSBjoern A. Zeeb srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
474dd4f32aeSBjoern A. Zeeb
475dd4f32aeSBjoern A. Zeeb spin_lock_bh(&srng->lock);
476dd4f32aeSBjoern A. Zeeb
477dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_begin(ab, srng);
478dd4f32aeSBjoern A. Zeeb
479dd4f32aeSBjoern A. Zeeb desc = ath11k_hal_srng_src_reap_next(ab, srng);
480dd4f32aeSBjoern A. Zeeb if (!desc) {
481dd4f32aeSBjoern A. Zeeb skb = ERR_PTR(-EIO);
482dd4f32aeSBjoern A. Zeeb goto err_unlock;
483dd4f32aeSBjoern A. Zeeb }
484dd4f32aeSBjoern A. Zeeb
485dd4f32aeSBjoern A. Zeeb skb = pipe->src_ring->skb[sw_index];
486dd4f32aeSBjoern A. Zeeb
487dd4f32aeSBjoern A. Zeeb pipe->src_ring->skb[sw_index] = NULL;
488dd4f32aeSBjoern A. Zeeb
489dd4f32aeSBjoern A. Zeeb sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
490dd4f32aeSBjoern A. Zeeb pipe->src_ring->sw_index = sw_index;
491dd4f32aeSBjoern A. Zeeb
492dd4f32aeSBjoern A. Zeeb err_unlock:
493dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&srng->lock);
494dd4f32aeSBjoern A. Zeeb
495dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ab->ce.ce_lock);
496dd4f32aeSBjoern A. Zeeb
497dd4f32aeSBjoern A. Zeeb return skb;
498dd4f32aeSBjoern A. Zeeb }
499dd4f32aeSBjoern A. Zeeb
ath11k_ce_tx_process_cb(struct ath11k_ce_pipe * pipe)500dd4f32aeSBjoern A. Zeeb static void ath11k_ce_tx_process_cb(struct ath11k_ce_pipe *pipe)
501dd4f32aeSBjoern A. Zeeb {
502dd4f32aeSBjoern A. Zeeb struct ath11k_base *ab = pipe->ab;
503dd4f32aeSBjoern A. Zeeb struct sk_buff *skb;
504dd4f32aeSBjoern A. Zeeb struct sk_buff_head list;
505dd4f32aeSBjoern A. Zeeb
506dd4f32aeSBjoern A. Zeeb __skb_queue_head_init(&list);
507dd4f32aeSBjoern A. Zeeb while (!IS_ERR(skb = ath11k_ce_completed_send_next(pipe))) {
508dd4f32aeSBjoern A. Zeeb if (!skb)
509dd4f32aeSBjoern A. Zeeb continue;
510dd4f32aeSBjoern A. Zeeb
511dd4f32aeSBjoern A. Zeeb dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr, skb->len,
512dd4f32aeSBjoern A. Zeeb DMA_TO_DEVICE);
513dd4f32aeSBjoern A. Zeeb
514dd4f32aeSBjoern A. Zeeb if ((!pipe->send_cb) || ab->hw_params.credit_flow) {
515dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(skb);
516dd4f32aeSBjoern A. Zeeb continue;
517dd4f32aeSBjoern A. Zeeb }
518dd4f32aeSBjoern A. Zeeb
519dd4f32aeSBjoern A. Zeeb __skb_queue_tail(&list, skb);
520dd4f32aeSBjoern A. Zeeb }
521dd4f32aeSBjoern A. Zeeb
522dd4f32aeSBjoern A. Zeeb while ((skb = __skb_dequeue(&list))) {
523*28348caeSBjoern A. Zeeb ath11k_dbg(ab, ATH11K_DBG_CE, "tx ce pipe %d len %d\n",
524dd4f32aeSBjoern A. Zeeb pipe->pipe_num, skb->len);
525dd4f32aeSBjoern A. Zeeb pipe->send_cb(ab, skb);
526dd4f32aeSBjoern A. Zeeb }
527dd4f32aeSBjoern A. Zeeb }
528dd4f32aeSBjoern A. Zeeb
ath11k_ce_srng_msi_ring_params_setup(struct ath11k_base * ab,u32 ce_id,struct hal_srng_params * ring_params)529dd4f32aeSBjoern A. Zeeb static void ath11k_ce_srng_msi_ring_params_setup(struct ath11k_base *ab, u32 ce_id,
530dd4f32aeSBjoern A. Zeeb struct hal_srng_params *ring_params)
531dd4f32aeSBjoern A. Zeeb {
532dd4f32aeSBjoern A. Zeeb u32 msi_data_start;
533dd4f32aeSBjoern A. Zeeb u32 msi_data_count, msi_data_idx;
534dd4f32aeSBjoern A. Zeeb u32 msi_irq_start;
535dd4f32aeSBjoern A. Zeeb u32 addr_lo;
536dd4f32aeSBjoern A. Zeeb u32 addr_hi;
537dd4f32aeSBjoern A. Zeeb int ret;
538dd4f32aeSBjoern A. Zeeb
539dd4f32aeSBjoern A. Zeeb ret = ath11k_get_user_msi_vector(ab, "CE",
540dd4f32aeSBjoern A. Zeeb &msi_data_count, &msi_data_start,
541dd4f32aeSBjoern A. Zeeb &msi_irq_start);
542dd4f32aeSBjoern A. Zeeb
543dd4f32aeSBjoern A. Zeeb if (ret)
544dd4f32aeSBjoern A. Zeeb return;
545dd4f32aeSBjoern A. Zeeb
546dd4f32aeSBjoern A. Zeeb ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
547dd4f32aeSBjoern A. Zeeb ath11k_get_ce_msi_idx(ab, ce_id, &msi_data_idx);
548dd4f32aeSBjoern A. Zeeb
549dd4f32aeSBjoern A. Zeeb ring_params->msi_addr = addr_lo;
550dd4f32aeSBjoern A. Zeeb ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
551dd4f32aeSBjoern A. Zeeb ring_params->msi_data = (msi_data_idx % msi_data_count) + msi_data_start;
552dd4f32aeSBjoern A. Zeeb ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
553dd4f32aeSBjoern A. Zeeb }
554dd4f32aeSBjoern A. Zeeb
ath11k_ce_init_ring(struct ath11k_base * ab,struct ath11k_ce_ring * ce_ring,int ce_id,enum hal_ring_type type)555dd4f32aeSBjoern A. Zeeb static int ath11k_ce_init_ring(struct ath11k_base *ab,
556dd4f32aeSBjoern A. Zeeb struct ath11k_ce_ring *ce_ring,
557dd4f32aeSBjoern A. Zeeb int ce_id, enum hal_ring_type type)
558dd4f32aeSBjoern A. Zeeb {
559dd4f32aeSBjoern A. Zeeb struct hal_srng_params params = { 0 };
560dd4f32aeSBjoern A. Zeeb int ret;
561dd4f32aeSBjoern A. Zeeb
562dd4f32aeSBjoern A. Zeeb params.ring_base_paddr = ce_ring->base_addr_ce_space;
563dd4f32aeSBjoern A. Zeeb params.ring_base_vaddr = ce_ring->base_addr_owner_space;
564dd4f32aeSBjoern A. Zeeb params.num_entries = ce_ring->nentries;
565dd4f32aeSBjoern A. Zeeb
566dd4f32aeSBjoern A. Zeeb if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags))
567dd4f32aeSBjoern A. Zeeb ath11k_ce_srng_msi_ring_params_setup(ab, ce_id, ¶ms);
568dd4f32aeSBjoern A. Zeeb
569dd4f32aeSBjoern A. Zeeb switch (type) {
570dd4f32aeSBjoern A. Zeeb case HAL_CE_SRC:
571dd4f32aeSBjoern A. Zeeb if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags))
572dd4f32aeSBjoern A. Zeeb params.intr_batch_cntr_thres_entries = 1;
573dd4f32aeSBjoern A. Zeeb break;
574dd4f32aeSBjoern A. Zeeb case HAL_CE_DST:
575dd4f32aeSBjoern A. Zeeb params.max_buffer_len = ab->hw_params.host_ce_config[ce_id].src_sz_max;
576dd4f32aeSBjoern A. Zeeb if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
577dd4f32aeSBjoern A. Zeeb params.intr_timer_thres_us = 1024;
578dd4f32aeSBjoern A. Zeeb params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
579dd4f32aeSBjoern A. Zeeb params.low_threshold = ce_ring->nentries - 3;
580dd4f32aeSBjoern A. Zeeb }
581dd4f32aeSBjoern A. Zeeb break;
582dd4f32aeSBjoern A. Zeeb case HAL_CE_DST_STATUS:
583dd4f32aeSBjoern A. Zeeb if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
584dd4f32aeSBjoern A. Zeeb params.intr_batch_cntr_thres_entries = 1;
585dd4f32aeSBjoern A. Zeeb params.intr_timer_thres_us = 0x1000;
586dd4f32aeSBjoern A. Zeeb }
587dd4f32aeSBjoern A. Zeeb break;
588dd4f32aeSBjoern A. Zeeb default:
589dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "Invalid CE ring type %d\n", type);
590dd4f32aeSBjoern A. Zeeb return -EINVAL;
591dd4f32aeSBjoern A. Zeeb }
592dd4f32aeSBjoern A. Zeeb
593dd4f32aeSBjoern A. Zeeb /* TODO: Init other params needed by HAL to init the ring */
594dd4f32aeSBjoern A. Zeeb
595dd4f32aeSBjoern A. Zeeb ret = ath11k_hal_srng_setup(ab, type, ce_id, 0, ¶ms);
596dd4f32aeSBjoern A. Zeeb if (ret < 0) {
597dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
598dd4f32aeSBjoern A. Zeeb ret, ce_id);
599dd4f32aeSBjoern A. Zeeb return ret;
600dd4f32aeSBjoern A. Zeeb }
601dd4f32aeSBjoern A. Zeeb
602dd4f32aeSBjoern A. Zeeb ce_ring->hal_ring_id = ret;
603dd4f32aeSBjoern A. Zeeb
604dd4f32aeSBjoern A. Zeeb if (ab->hw_params.supports_shadow_regs &&
605dd4f32aeSBjoern A. Zeeb ath11k_ce_need_shadow_fix(ce_id))
606dd4f32aeSBjoern A. Zeeb ath11k_dp_shadow_init_timer(ab, &ab->ce.hp_timer[ce_id],
607dd4f32aeSBjoern A. Zeeb ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
608dd4f32aeSBjoern A. Zeeb ce_ring->hal_ring_id);
609dd4f32aeSBjoern A. Zeeb
610dd4f32aeSBjoern A. Zeeb return 0;
611dd4f32aeSBjoern A. Zeeb }
612dd4f32aeSBjoern A. Zeeb
613dd4f32aeSBjoern A. Zeeb static struct ath11k_ce_ring *
ath11k_ce_alloc_ring(struct ath11k_base * ab,int nentries,int desc_sz)614dd4f32aeSBjoern A. Zeeb ath11k_ce_alloc_ring(struct ath11k_base *ab, int nentries, int desc_sz)
615dd4f32aeSBjoern A. Zeeb {
616dd4f32aeSBjoern A. Zeeb struct ath11k_ce_ring *ce_ring;
617dd4f32aeSBjoern A. Zeeb dma_addr_t base_addr;
618dd4f32aeSBjoern A. Zeeb
619dd4f32aeSBjoern A. Zeeb ce_ring = kzalloc(struct_size(ce_ring, skb, nentries), GFP_KERNEL);
620dd4f32aeSBjoern A. Zeeb if (ce_ring == NULL)
621dd4f32aeSBjoern A. Zeeb return ERR_PTR(-ENOMEM);
622dd4f32aeSBjoern A. Zeeb
623dd4f32aeSBjoern A. Zeeb ce_ring->nentries = nentries;
624dd4f32aeSBjoern A. Zeeb ce_ring->nentries_mask = nentries - 1;
625dd4f32aeSBjoern A. Zeeb
626dd4f32aeSBjoern A. Zeeb /* Legacy platforms that do not support cache
627dd4f32aeSBjoern A. Zeeb * coherent DMA are unsupported
628dd4f32aeSBjoern A. Zeeb */
629dd4f32aeSBjoern A. Zeeb ce_ring->base_addr_owner_space_unaligned =
630dd4f32aeSBjoern A. Zeeb dma_alloc_coherent(ab->dev,
631dd4f32aeSBjoern A. Zeeb nentries * desc_sz + CE_DESC_RING_ALIGN,
632dd4f32aeSBjoern A. Zeeb &base_addr, GFP_KERNEL);
633dd4f32aeSBjoern A. Zeeb if (!ce_ring->base_addr_owner_space_unaligned) {
634dd4f32aeSBjoern A. Zeeb kfree(ce_ring);
635dd4f32aeSBjoern A. Zeeb return ERR_PTR(-ENOMEM);
636dd4f32aeSBjoern A. Zeeb }
637dd4f32aeSBjoern A. Zeeb
638dd4f32aeSBjoern A. Zeeb ce_ring->base_addr_ce_space_unaligned = base_addr;
639dd4f32aeSBjoern A. Zeeb
640dd4f32aeSBjoern A. Zeeb ce_ring->base_addr_owner_space = PTR_ALIGN(
641dd4f32aeSBjoern A. Zeeb ce_ring->base_addr_owner_space_unaligned,
642dd4f32aeSBjoern A. Zeeb CE_DESC_RING_ALIGN);
643dd4f32aeSBjoern A. Zeeb ce_ring->base_addr_ce_space = ALIGN(
644dd4f32aeSBjoern A. Zeeb ce_ring->base_addr_ce_space_unaligned,
645dd4f32aeSBjoern A. Zeeb CE_DESC_RING_ALIGN);
646dd4f32aeSBjoern A. Zeeb
647dd4f32aeSBjoern A. Zeeb return ce_ring;
648dd4f32aeSBjoern A. Zeeb }
649dd4f32aeSBjoern A. Zeeb
ath11k_ce_alloc_pipe(struct ath11k_base * ab,int ce_id)650dd4f32aeSBjoern A. Zeeb static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id)
651dd4f32aeSBjoern A. Zeeb {
652dd4f32aeSBjoern A. Zeeb struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
653dd4f32aeSBjoern A. Zeeb const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id];
654dd4f32aeSBjoern A. Zeeb struct ath11k_ce_ring *ring;
655dd4f32aeSBjoern A. Zeeb int nentries;
656dd4f32aeSBjoern A. Zeeb int desc_sz;
657dd4f32aeSBjoern A. Zeeb
658dd4f32aeSBjoern A. Zeeb pipe->attr_flags = attr->flags;
659dd4f32aeSBjoern A. Zeeb
660dd4f32aeSBjoern A. Zeeb if (attr->src_nentries) {
661dd4f32aeSBjoern A. Zeeb pipe->send_cb = attr->send_cb;
662dd4f32aeSBjoern A. Zeeb nentries = roundup_pow_of_two(attr->src_nentries);
663dd4f32aeSBjoern A. Zeeb desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
664dd4f32aeSBjoern A. Zeeb ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
665dd4f32aeSBjoern A. Zeeb if (IS_ERR(ring))
666dd4f32aeSBjoern A. Zeeb return PTR_ERR(ring);
667dd4f32aeSBjoern A. Zeeb pipe->src_ring = ring;
668dd4f32aeSBjoern A. Zeeb }
669dd4f32aeSBjoern A. Zeeb
670dd4f32aeSBjoern A. Zeeb if (attr->dest_nentries) {
671dd4f32aeSBjoern A. Zeeb pipe->recv_cb = attr->recv_cb;
672dd4f32aeSBjoern A. Zeeb nentries = roundup_pow_of_two(attr->dest_nentries);
673dd4f32aeSBjoern A. Zeeb desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
674dd4f32aeSBjoern A. Zeeb ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
675dd4f32aeSBjoern A. Zeeb if (IS_ERR(ring))
676dd4f32aeSBjoern A. Zeeb return PTR_ERR(ring);
677dd4f32aeSBjoern A. Zeeb pipe->dest_ring = ring;
678dd4f32aeSBjoern A. Zeeb
679dd4f32aeSBjoern A. Zeeb desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
680dd4f32aeSBjoern A. Zeeb ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
681dd4f32aeSBjoern A. Zeeb if (IS_ERR(ring))
682dd4f32aeSBjoern A. Zeeb return PTR_ERR(ring);
683dd4f32aeSBjoern A. Zeeb pipe->status_ring = ring;
684dd4f32aeSBjoern A. Zeeb }
685dd4f32aeSBjoern A. Zeeb
686dd4f32aeSBjoern A. Zeeb return 0;
687dd4f32aeSBjoern A. Zeeb }
688dd4f32aeSBjoern A. Zeeb
ath11k_ce_per_engine_service(struct ath11k_base * ab,u16 ce_id)689dd4f32aeSBjoern A. Zeeb void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id)
690dd4f32aeSBjoern A. Zeeb {
691dd4f32aeSBjoern A. Zeeb struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
692dd4f32aeSBjoern A. Zeeb const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id];
693dd4f32aeSBjoern A. Zeeb
694dd4f32aeSBjoern A. Zeeb if (attr->src_nentries)
695dd4f32aeSBjoern A. Zeeb ath11k_ce_tx_process_cb(pipe);
696dd4f32aeSBjoern A. Zeeb
697dd4f32aeSBjoern A. Zeeb if (pipe->recv_cb)
698dd4f32aeSBjoern A. Zeeb ath11k_ce_recv_process_cb(pipe);
699dd4f32aeSBjoern A. Zeeb }
700dd4f32aeSBjoern A. Zeeb
ath11k_ce_poll_send_completed(struct ath11k_base * ab,u8 pipe_id)701dd4f32aeSBjoern A. Zeeb void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id)
702dd4f32aeSBjoern A. Zeeb {
703dd4f32aeSBjoern A. Zeeb struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
704dd4f32aeSBjoern A. Zeeb const struct ce_attr *attr = &ab->hw_params.host_ce_config[pipe_id];
705dd4f32aeSBjoern A. Zeeb
706dd4f32aeSBjoern A. Zeeb if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && attr->src_nentries)
707dd4f32aeSBjoern A. Zeeb ath11k_ce_tx_process_cb(pipe);
708dd4f32aeSBjoern A. Zeeb }
709dd4f32aeSBjoern A. Zeeb EXPORT_SYMBOL(ath11k_ce_per_engine_service);
710dd4f32aeSBjoern A. Zeeb
ath11k_ce_send(struct ath11k_base * ab,struct sk_buff * skb,u8 pipe_id,u16 transfer_id)711dd4f32aeSBjoern A. Zeeb int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id,
712dd4f32aeSBjoern A. Zeeb u16 transfer_id)
713dd4f32aeSBjoern A. Zeeb {
714dd4f32aeSBjoern A. Zeeb struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
715dd4f32aeSBjoern A. Zeeb struct hal_srng *srng;
716dd4f32aeSBjoern A. Zeeb u32 *desc;
717dd4f32aeSBjoern A. Zeeb unsigned int write_index, sw_index;
718dd4f32aeSBjoern A. Zeeb unsigned int nentries_mask;
719dd4f32aeSBjoern A. Zeeb int ret = 0;
720dd4f32aeSBjoern A. Zeeb u8 byte_swap_data = 0;
721dd4f32aeSBjoern A. Zeeb int num_used;
722dd4f32aeSBjoern A. Zeeb
723dd4f32aeSBjoern A. Zeeb /* Check if some entries could be regained by handling tx completion if
724dd4f32aeSBjoern A. Zeeb * the CE has interrupts disabled and the used entries is more than the
725dd4f32aeSBjoern A. Zeeb * defined usage threshold.
726dd4f32aeSBjoern A. Zeeb */
727dd4f32aeSBjoern A. Zeeb if (pipe->attr_flags & CE_ATTR_DIS_INTR) {
728dd4f32aeSBjoern A. Zeeb spin_lock_bh(&ab->ce.ce_lock);
729dd4f32aeSBjoern A. Zeeb write_index = pipe->src_ring->write_index;
730dd4f32aeSBjoern A. Zeeb
731dd4f32aeSBjoern A. Zeeb sw_index = pipe->src_ring->sw_index;
732dd4f32aeSBjoern A. Zeeb
733dd4f32aeSBjoern A. Zeeb if (write_index >= sw_index)
734dd4f32aeSBjoern A. Zeeb num_used = write_index - sw_index;
735dd4f32aeSBjoern A. Zeeb else
736dd4f32aeSBjoern A. Zeeb num_used = pipe->src_ring->nentries - sw_index +
737dd4f32aeSBjoern A. Zeeb write_index;
738dd4f32aeSBjoern A. Zeeb
739dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ab->ce.ce_lock);
740dd4f32aeSBjoern A. Zeeb
741dd4f32aeSBjoern A. Zeeb if (num_used > ATH11K_CE_USAGE_THRESHOLD)
742dd4f32aeSBjoern A. Zeeb ath11k_ce_poll_send_completed(ab, pipe->pipe_num);
743dd4f32aeSBjoern A. Zeeb }
744dd4f32aeSBjoern A. Zeeb
745dd4f32aeSBjoern A. Zeeb if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
746dd4f32aeSBjoern A. Zeeb return -ESHUTDOWN;
747dd4f32aeSBjoern A. Zeeb
748dd4f32aeSBjoern A. Zeeb spin_lock_bh(&ab->ce.ce_lock);
749dd4f32aeSBjoern A. Zeeb
750dd4f32aeSBjoern A. Zeeb write_index = pipe->src_ring->write_index;
751dd4f32aeSBjoern A. Zeeb nentries_mask = pipe->src_ring->nentries_mask;
752dd4f32aeSBjoern A. Zeeb
753dd4f32aeSBjoern A. Zeeb srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
754dd4f32aeSBjoern A. Zeeb
755dd4f32aeSBjoern A. Zeeb spin_lock_bh(&srng->lock);
756dd4f32aeSBjoern A. Zeeb
757dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_begin(ab, srng);
758dd4f32aeSBjoern A. Zeeb
759dd4f32aeSBjoern A. Zeeb if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
760dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_end(ab, srng);
761dd4f32aeSBjoern A. Zeeb ret = -ENOBUFS;
762dd4f32aeSBjoern A. Zeeb goto err_unlock;
763dd4f32aeSBjoern A. Zeeb }
764dd4f32aeSBjoern A. Zeeb
765dd4f32aeSBjoern A. Zeeb desc = ath11k_hal_srng_src_get_next_reaped(ab, srng);
766dd4f32aeSBjoern A. Zeeb if (!desc) {
767dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_end(ab, srng);
768dd4f32aeSBjoern A. Zeeb ret = -ENOBUFS;
769dd4f32aeSBjoern A. Zeeb goto err_unlock;
770dd4f32aeSBjoern A. Zeeb }
771dd4f32aeSBjoern A. Zeeb
772dd4f32aeSBjoern A. Zeeb if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
773dd4f32aeSBjoern A. Zeeb byte_swap_data = 1;
774dd4f32aeSBjoern A. Zeeb
775dd4f32aeSBjoern A. Zeeb ath11k_hal_ce_src_set_desc(desc, ATH11K_SKB_CB(skb)->paddr,
776dd4f32aeSBjoern A. Zeeb skb->len, transfer_id, byte_swap_data);
777dd4f32aeSBjoern A. Zeeb
778dd4f32aeSBjoern A. Zeeb pipe->src_ring->skb[write_index] = skb;
779dd4f32aeSBjoern A. Zeeb pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask,
780dd4f32aeSBjoern A. Zeeb write_index);
781dd4f32aeSBjoern A. Zeeb
782dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_end(ab, srng);
783dd4f32aeSBjoern A. Zeeb
784dd4f32aeSBjoern A. Zeeb if (ath11k_ce_need_shadow_fix(pipe_id))
785dd4f32aeSBjoern A. Zeeb ath11k_dp_shadow_start_timer(ab, srng, &ab->ce.hp_timer[pipe_id]);
786dd4f32aeSBjoern A. Zeeb
787dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&srng->lock);
788dd4f32aeSBjoern A. Zeeb
789dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ab->ce.ce_lock);
790dd4f32aeSBjoern A. Zeeb
791dd4f32aeSBjoern A. Zeeb return 0;
792dd4f32aeSBjoern A. Zeeb
793dd4f32aeSBjoern A. Zeeb err_unlock:
794dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&srng->lock);
795dd4f32aeSBjoern A. Zeeb
796dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ab->ce.ce_lock);
797dd4f32aeSBjoern A. Zeeb
798dd4f32aeSBjoern A. Zeeb return ret;
799dd4f32aeSBjoern A. Zeeb }
800dd4f32aeSBjoern A. Zeeb
ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe * pipe)801dd4f32aeSBjoern A. Zeeb static void ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe *pipe)
802dd4f32aeSBjoern A. Zeeb {
803dd4f32aeSBjoern A. Zeeb struct ath11k_base *ab = pipe->ab;
804dd4f32aeSBjoern A. Zeeb struct ath11k_ce_ring *ring = pipe->dest_ring;
805dd4f32aeSBjoern A. Zeeb struct sk_buff *skb;
806dd4f32aeSBjoern A. Zeeb int i;
807dd4f32aeSBjoern A. Zeeb
808dd4f32aeSBjoern A. Zeeb if (!(ring && pipe->buf_sz))
809dd4f32aeSBjoern A. Zeeb return;
810dd4f32aeSBjoern A. Zeeb
811dd4f32aeSBjoern A. Zeeb for (i = 0; i < ring->nentries; i++) {
812dd4f32aeSBjoern A. Zeeb skb = ring->skb[i];
813dd4f32aeSBjoern A. Zeeb if (!skb)
814dd4f32aeSBjoern A. Zeeb continue;
815dd4f32aeSBjoern A. Zeeb
816dd4f32aeSBjoern A. Zeeb ring->skb[i] = NULL;
817dd4f32aeSBjoern A. Zeeb dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
818dd4f32aeSBjoern A. Zeeb skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
819dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(skb);
820dd4f32aeSBjoern A. Zeeb }
821dd4f32aeSBjoern A. Zeeb }
822dd4f32aeSBjoern A. Zeeb
ath11k_ce_shadow_config(struct ath11k_base * ab)823dd4f32aeSBjoern A. Zeeb static void ath11k_ce_shadow_config(struct ath11k_base *ab)
824dd4f32aeSBjoern A. Zeeb {
825dd4f32aeSBjoern A. Zeeb int i;
826dd4f32aeSBjoern A. Zeeb
827dd4f32aeSBjoern A. Zeeb for (i = 0; i < ab->hw_params.ce_count; i++) {
828dd4f32aeSBjoern A. Zeeb if (ab->hw_params.host_ce_config[i].src_nentries)
829dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_update_shadow_config(ab,
830dd4f32aeSBjoern A. Zeeb HAL_CE_SRC, i);
831dd4f32aeSBjoern A. Zeeb
832dd4f32aeSBjoern A. Zeeb if (ab->hw_params.host_ce_config[i].dest_nentries) {
833dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_update_shadow_config(ab,
834dd4f32aeSBjoern A. Zeeb HAL_CE_DST, i);
835dd4f32aeSBjoern A. Zeeb
836dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_update_shadow_config(ab,
837dd4f32aeSBjoern A. Zeeb HAL_CE_DST_STATUS, i);
838dd4f32aeSBjoern A. Zeeb }
839dd4f32aeSBjoern A. Zeeb }
840dd4f32aeSBjoern A. Zeeb }
841dd4f32aeSBjoern A. Zeeb
ath11k_ce_get_shadow_config(struct ath11k_base * ab,u32 ** shadow_cfg,u32 * shadow_cfg_len)842dd4f32aeSBjoern A. Zeeb void ath11k_ce_get_shadow_config(struct ath11k_base *ab,
843dd4f32aeSBjoern A. Zeeb u32 **shadow_cfg, u32 *shadow_cfg_len)
844dd4f32aeSBjoern A. Zeeb {
845dd4f32aeSBjoern A. Zeeb if (!ab->hw_params.supports_shadow_regs)
846dd4f32aeSBjoern A. Zeeb return;
847dd4f32aeSBjoern A. Zeeb
848dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
849dd4f32aeSBjoern A. Zeeb
850dd4f32aeSBjoern A. Zeeb /* shadow is already configured */
851dd4f32aeSBjoern A. Zeeb if (*shadow_cfg_len)
852dd4f32aeSBjoern A. Zeeb return;
853dd4f32aeSBjoern A. Zeeb
854dd4f32aeSBjoern A. Zeeb /* shadow isn't configured yet, configure now.
855dd4f32aeSBjoern A. Zeeb * non-CE srngs are configured firstly, then
856dd4f32aeSBjoern A. Zeeb * all CE srngs.
857dd4f32aeSBjoern A. Zeeb */
858dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_shadow_config(ab);
859dd4f32aeSBjoern A. Zeeb ath11k_ce_shadow_config(ab);
860dd4f32aeSBjoern A. Zeeb
861dd4f32aeSBjoern A. Zeeb /* get the shadow configuration */
862dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
863dd4f32aeSBjoern A. Zeeb }
864dd4f32aeSBjoern A. Zeeb EXPORT_SYMBOL(ath11k_ce_get_shadow_config);
865dd4f32aeSBjoern A. Zeeb
ath11k_ce_cleanup_pipes(struct ath11k_base * ab)866dd4f32aeSBjoern A. Zeeb void ath11k_ce_cleanup_pipes(struct ath11k_base *ab)
867dd4f32aeSBjoern A. Zeeb {
868dd4f32aeSBjoern A. Zeeb struct ath11k_ce_pipe *pipe;
869dd4f32aeSBjoern A. Zeeb int pipe_num;
870dd4f32aeSBjoern A. Zeeb
871dd4f32aeSBjoern A. Zeeb ath11k_ce_stop_shadow_timers(ab);
872dd4f32aeSBjoern A. Zeeb
873dd4f32aeSBjoern A. Zeeb for (pipe_num = 0; pipe_num < ab->hw_params.ce_count; pipe_num++) {
874dd4f32aeSBjoern A. Zeeb pipe = &ab->ce.ce_pipe[pipe_num];
875dd4f32aeSBjoern A. Zeeb ath11k_ce_rx_pipe_cleanup(pipe);
876dd4f32aeSBjoern A. Zeeb
877dd4f32aeSBjoern A. Zeeb /* Cleanup any src CE's which have interrupts disabled */
878dd4f32aeSBjoern A. Zeeb ath11k_ce_poll_send_completed(ab, pipe_num);
879dd4f32aeSBjoern A. Zeeb
880dd4f32aeSBjoern A. Zeeb /* NOTE: Should we also clean up tx buffer in all pipes? */
881dd4f32aeSBjoern A. Zeeb }
882dd4f32aeSBjoern A. Zeeb }
883dd4f32aeSBjoern A. Zeeb EXPORT_SYMBOL(ath11k_ce_cleanup_pipes);
884dd4f32aeSBjoern A. Zeeb
ath11k_ce_rx_post_buf(struct ath11k_base * ab)885dd4f32aeSBjoern A. Zeeb void ath11k_ce_rx_post_buf(struct ath11k_base *ab)
886dd4f32aeSBjoern A. Zeeb {
887dd4f32aeSBjoern A. Zeeb struct ath11k_ce_pipe *pipe;
888dd4f32aeSBjoern A. Zeeb int i;
889dd4f32aeSBjoern A. Zeeb int ret;
890dd4f32aeSBjoern A. Zeeb
891dd4f32aeSBjoern A. Zeeb for (i = 0; i < ab->hw_params.ce_count; i++) {
892dd4f32aeSBjoern A. Zeeb pipe = &ab->ce.ce_pipe[i];
893dd4f32aeSBjoern A. Zeeb ret = ath11k_ce_rx_post_pipe(pipe);
894dd4f32aeSBjoern A. Zeeb if (ret) {
895dd4f32aeSBjoern A. Zeeb if (ret == -ENOSPC)
896dd4f32aeSBjoern A. Zeeb continue;
897dd4f32aeSBjoern A. Zeeb
898dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
899dd4f32aeSBjoern A. Zeeb i, ret);
900dd4f32aeSBjoern A. Zeeb mod_timer(&ab->rx_replenish_retry,
901dd4f32aeSBjoern A. Zeeb jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
902dd4f32aeSBjoern A. Zeeb
903dd4f32aeSBjoern A. Zeeb return;
904dd4f32aeSBjoern A. Zeeb }
905dd4f32aeSBjoern A. Zeeb }
906dd4f32aeSBjoern A. Zeeb }
907dd4f32aeSBjoern A. Zeeb EXPORT_SYMBOL(ath11k_ce_rx_post_buf);
908dd4f32aeSBjoern A. Zeeb
ath11k_ce_rx_replenish_retry(struct timer_list * t)909dd4f32aeSBjoern A. Zeeb void ath11k_ce_rx_replenish_retry(struct timer_list *t)
910dd4f32aeSBjoern A. Zeeb {
911dd4f32aeSBjoern A. Zeeb struct ath11k_base *ab = from_timer(ab, t, rx_replenish_retry);
912dd4f32aeSBjoern A. Zeeb
913dd4f32aeSBjoern A. Zeeb ath11k_ce_rx_post_buf(ab);
914dd4f32aeSBjoern A. Zeeb }
915dd4f32aeSBjoern A. Zeeb
ath11k_ce_init_pipes(struct ath11k_base * ab)916dd4f32aeSBjoern A. Zeeb int ath11k_ce_init_pipes(struct ath11k_base *ab)
917dd4f32aeSBjoern A. Zeeb {
918dd4f32aeSBjoern A. Zeeb struct ath11k_ce_pipe *pipe;
919dd4f32aeSBjoern A. Zeeb int i;
920dd4f32aeSBjoern A. Zeeb int ret;
921dd4f32aeSBjoern A. Zeeb
922dd4f32aeSBjoern A. Zeeb for (i = 0; i < ab->hw_params.ce_count; i++) {
923dd4f32aeSBjoern A. Zeeb pipe = &ab->ce.ce_pipe[i];
924dd4f32aeSBjoern A. Zeeb
925dd4f32aeSBjoern A. Zeeb if (pipe->src_ring) {
926dd4f32aeSBjoern A. Zeeb ret = ath11k_ce_init_ring(ab, pipe->src_ring, i,
927dd4f32aeSBjoern A. Zeeb HAL_CE_SRC);
928dd4f32aeSBjoern A. Zeeb if (ret) {
929dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "failed to init src ring: %d\n",
930dd4f32aeSBjoern A. Zeeb ret);
931dd4f32aeSBjoern A. Zeeb /* Should we clear any partial init */
932dd4f32aeSBjoern A. Zeeb return ret;
933dd4f32aeSBjoern A. Zeeb }
934dd4f32aeSBjoern A. Zeeb
935dd4f32aeSBjoern A. Zeeb pipe->src_ring->write_index = 0;
936dd4f32aeSBjoern A. Zeeb pipe->src_ring->sw_index = 0;
937dd4f32aeSBjoern A. Zeeb }
938dd4f32aeSBjoern A. Zeeb
939dd4f32aeSBjoern A. Zeeb if (pipe->dest_ring) {
940dd4f32aeSBjoern A. Zeeb ret = ath11k_ce_init_ring(ab, pipe->dest_ring, i,
941dd4f32aeSBjoern A. Zeeb HAL_CE_DST);
942dd4f32aeSBjoern A. Zeeb if (ret) {
943dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "failed to init dest ring: %d\n",
944dd4f32aeSBjoern A. Zeeb ret);
945dd4f32aeSBjoern A. Zeeb /* Should we clear any partial init */
946dd4f32aeSBjoern A. Zeeb return ret;
947dd4f32aeSBjoern A. Zeeb }
948dd4f32aeSBjoern A. Zeeb
949dd4f32aeSBjoern A. Zeeb pipe->rx_buf_needed = pipe->dest_ring->nentries ?
950dd4f32aeSBjoern A. Zeeb pipe->dest_ring->nentries - 2 : 0;
951dd4f32aeSBjoern A. Zeeb
952dd4f32aeSBjoern A. Zeeb pipe->dest_ring->write_index = 0;
953dd4f32aeSBjoern A. Zeeb pipe->dest_ring->sw_index = 0;
954dd4f32aeSBjoern A. Zeeb }
955dd4f32aeSBjoern A. Zeeb
956dd4f32aeSBjoern A. Zeeb if (pipe->status_ring) {
957dd4f32aeSBjoern A. Zeeb ret = ath11k_ce_init_ring(ab, pipe->status_ring, i,
958dd4f32aeSBjoern A. Zeeb HAL_CE_DST_STATUS);
959dd4f32aeSBjoern A. Zeeb if (ret) {
960dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "failed to init dest status ing: %d\n",
961dd4f32aeSBjoern A. Zeeb ret);
962dd4f32aeSBjoern A. Zeeb /* Should we clear any partial init */
963dd4f32aeSBjoern A. Zeeb return ret;
964dd4f32aeSBjoern A. Zeeb }
965dd4f32aeSBjoern A. Zeeb
966dd4f32aeSBjoern A. Zeeb pipe->status_ring->write_index = 0;
967dd4f32aeSBjoern A. Zeeb pipe->status_ring->sw_index = 0;
968dd4f32aeSBjoern A. Zeeb }
969dd4f32aeSBjoern A. Zeeb }
970dd4f32aeSBjoern A. Zeeb
971dd4f32aeSBjoern A. Zeeb return 0;
972dd4f32aeSBjoern A. Zeeb }
973dd4f32aeSBjoern A. Zeeb
ath11k_ce_free_pipes(struct ath11k_base * ab)974dd4f32aeSBjoern A. Zeeb void ath11k_ce_free_pipes(struct ath11k_base *ab)
975dd4f32aeSBjoern A. Zeeb {
976dd4f32aeSBjoern A. Zeeb struct ath11k_ce_pipe *pipe;
977dd4f32aeSBjoern A. Zeeb struct ath11k_ce_ring *ce_ring;
978dd4f32aeSBjoern A. Zeeb int desc_sz;
979dd4f32aeSBjoern A. Zeeb int i;
980dd4f32aeSBjoern A. Zeeb
981dd4f32aeSBjoern A. Zeeb for (i = 0; i < ab->hw_params.ce_count; i++) {
982dd4f32aeSBjoern A. Zeeb pipe = &ab->ce.ce_pipe[i];
983dd4f32aeSBjoern A. Zeeb
984dd4f32aeSBjoern A. Zeeb if (ath11k_ce_need_shadow_fix(i))
985dd4f32aeSBjoern A. Zeeb ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]);
986dd4f32aeSBjoern A. Zeeb
987dd4f32aeSBjoern A. Zeeb if (pipe->src_ring) {
988dd4f32aeSBjoern A. Zeeb desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
989dd4f32aeSBjoern A. Zeeb ce_ring = pipe->src_ring;
990dd4f32aeSBjoern A. Zeeb dma_free_coherent(ab->dev,
991dd4f32aeSBjoern A. Zeeb pipe->src_ring->nentries * desc_sz +
992dd4f32aeSBjoern A. Zeeb CE_DESC_RING_ALIGN,
993dd4f32aeSBjoern A. Zeeb ce_ring->base_addr_owner_space_unaligned,
994dd4f32aeSBjoern A. Zeeb ce_ring->base_addr_ce_space_unaligned);
995dd4f32aeSBjoern A. Zeeb kfree(pipe->src_ring);
996dd4f32aeSBjoern A. Zeeb pipe->src_ring = NULL;
997dd4f32aeSBjoern A. Zeeb }
998dd4f32aeSBjoern A. Zeeb
999dd4f32aeSBjoern A. Zeeb if (pipe->dest_ring) {
1000dd4f32aeSBjoern A. Zeeb desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
1001dd4f32aeSBjoern A. Zeeb ce_ring = pipe->dest_ring;
1002dd4f32aeSBjoern A. Zeeb dma_free_coherent(ab->dev,
1003dd4f32aeSBjoern A. Zeeb pipe->dest_ring->nentries * desc_sz +
1004dd4f32aeSBjoern A. Zeeb CE_DESC_RING_ALIGN,
1005dd4f32aeSBjoern A. Zeeb ce_ring->base_addr_owner_space_unaligned,
1006dd4f32aeSBjoern A. Zeeb ce_ring->base_addr_ce_space_unaligned);
1007dd4f32aeSBjoern A. Zeeb kfree(pipe->dest_ring);
1008dd4f32aeSBjoern A. Zeeb pipe->dest_ring = NULL;
1009dd4f32aeSBjoern A. Zeeb }
1010dd4f32aeSBjoern A. Zeeb
1011dd4f32aeSBjoern A. Zeeb if (pipe->status_ring) {
1012dd4f32aeSBjoern A. Zeeb desc_sz =
1013dd4f32aeSBjoern A. Zeeb ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
1014dd4f32aeSBjoern A. Zeeb ce_ring = pipe->status_ring;
1015dd4f32aeSBjoern A. Zeeb dma_free_coherent(ab->dev,
1016dd4f32aeSBjoern A. Zeeb pipe->status_ring->nentries * desc_sz +
1017dd4f32aeSBjoern A. Zeeb CE_DESC_RING_ALIGN,
1018dd4f32aeSBjoern A. Zeeb ce_ring->base_addr_owner_space_unaligned,
1019dd4f32aeSBjoern A. Zeeb ce_ring->base_addr_ce_space_unaligned);
1020dd4f32aeSBjoern A. Zeeb kfree(pipe->status_ring);
1021dd4f32aeSBjoern A. Zeeb pipe->status_ring = NULL;
1022dd4f32aeSBjoern A. Zeeb }
1023dd4f32aeSBjoern A. Zeeb }
1024dd4f32aeSBjoern A. Zeeb }
1025dd4f32aeSBjoern A. Zeeb EXPORT_SYMBOL(ath11k_ce_free_pipes);
1026dd4f32aeSBjoern A. Zeeb
ath11k_ce_alloc_pipes(struct ath11k_base * ab)1027dd4f32aeSBjoern A. Zeeb int ath11k_ce_alloc_pipes(struct ath11k_base *ab)
1028dd4f32aeSBjoern A. Zeeb {
1029dd4f32aeSBjoern A. Zeeb struct ath11k_ce_pipe *pipe;
1030dd4f32aeSBjoern A. Zeeb int i;
1031dd4f32aeSBjoern A. Zeeb int ret;
1032dd4f32aeSBjoern A. Zeeb const struct ce_attr *attr;
1033dd4f32aeSBjoern A. Zeeb
1034dd4f32aeSBjoern A. Zeeb spin_lock_init(&ab->ce.ce_lock);
1035dd4f32aeSBjoern A. Zeeb
1036dd4f32aeSBjoern A. Zeeb for (i = 0; i < ab->hw_params.ce_count; i++) {
1037dd4f32aeSBjoern A. Zeeb attr = &ab->hw_params.host_ce_config[i];
1038dd4f32aeSBjoern A. Zeeb pipe = &ab->ce.ce_pipe[i];
1039dd4f32aeSBjoern A. Zeeb pipe->pipe_num = i;
1040dd4f32aeSBjoern A. Zeeb pipe->ab = ab;
1041dd4f32aeSBjoern A. Zeeb pipe->buf_sz = attr->src_sz_max;
1042dd4f32aeSBjoern A. Zeeb
1043dd4f32aeSBjoern A. Zeeb ret = ath11k_ce_alloc_pipe(ab, i);
1044dd4f32aeSBjoern A. Zeeb if (ret) {
1045*28348caeSBjoern A. Zeeb /* Free any partial successful allocation */
1046dd4f32aeSBjoern A. Zeeb ath11k_ce_free_pipes(ab);
1047dd4f32aeSBjoern A. Zeeb return ret;
1048dd4f32aeSBjoern A. Zeeb }
1049dd4f32aeSBjoern A. Zeeb }
1050dd4f32aeSBjoern A. Zeeb
1051dd4f32aeSBjoern A. Zeeb return 0;
1052dd4f32aeSBjoern A. Zeeb }
1053dd4f32aeSBjoern A. Zeeb EXPORT_SYMBOL(ath11k_ce_alloc_pipes);
1054dd4f32aeSBjoern A. Zeeb
1055dd4f32aeSBjoern A. Zeeb /* For Big Endian Host, Copy Engine byte_swap is enabled
1056dd4f32aeSBjoern A. Zeeb * When Copy Engine does byte_swap, need to byte swap again for the
1057dd4f32aeSBjoern A. Zeeb * Host to get/put buffer content in the correct byte order
1058dd4f32aeSBjoern A. Zeeb */
ath11k_ce_byte_swap(void * mem,u32 len)1059dd4f32aeSBjoern A. Zeeb void ath11k_ce_byte_swap(void *mem, u32 len)
1060dd4f32aeSBjoern A. Zeeb {
1061dd4f32aeSBjoern A. Zeeb int i;
1062dd4f32aeSBjoern A. Zeeb #if defined(__FreeBSD__)
1063dd4f32aeSBjoern A. Zeeb u32 *m = mem;
1064dd4f32aeSBjoern A. Zeeb #endif
1065dd4f32aeSBjoern A. Zeeb
1066dd4f32aeSBjoern A. Zeeb if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
1067dd4f32aeSBjoern A. Zeeb if (!mem)
1068dd4f32aeSBjoern A. Zeeb return;
1069dd4f32aeSBjoern A. Zeeb
1070dd4f32aeSBjoern A. Zeeb for (i = 0; i < (len / 4); i++) {
1071dd4f32aeSBjoern A. Zeeb #if defined(__linux__)
1072dd4f32aeSBjoern A. Zeeb *(u32 *)mem = swab32(*(u32 *)mem);
1073dd4f32aeSBjoern A. Zeeb mem += 4;
1074dd4f32aeSBjoern A. Zeeb #elif defined(__FreeBSD__)
1075dd4f32aeSBjoern A. Zeeb *m = swab32(*m);
1076dd4f32aeSBjoern A. Zeeb m++;
1077dd4f32aeSBjoern A. Zeeb #endif
1078dd4f32aeSBjoern A. Zeeb }
1079dd4f32aeSBjoern A. Zeeb }
1080dd4f32aeSBjoern A. Zeeb }
1081dd4f32aeSBjoern A. Zeeb
ath11k_ce_get_attr_flags(struct ath11k_base * ab,int ce_id)1082dd4f32aeSBjoern A. Zeeb int ath11k_ce_get_attr_flags(struct ath11k_base *ab, int ce_id)
1083dd4f32aeSBjoern A. Zeeb {
1084dd4f32aeSBjoern A. Zeeb if (ce_id >= ab->hw_params.ce_count)
1085dd4f32aeSBjoern A. Zeeb return -EINVAL;
1086dd4f32aeSBjoern A. Zeeb
1087dd4f32aeSBjoern A. Zeeb return ab->hw_params.host_ce_config[ce_id].flags;
1088dd4f32aeSBjoern A. Zeeb }
1089dd4f32aeSBjoern A. Zeeb EXPORT_SYMBOL(ath11k_ce_get_attr_flags);
1090