1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5 * Copyright (c) 2018 The Linux Foundation. All rights reserved.
6 */
7
8 #include "hif.h"
9 #include "ce.h"
10 #include "debug.h"
11
12 /*
13 * Support for Copy Engine hardware, which is mainly used for
14 * communication between Host and Target over a PCIe interconnect.
15 */
16
17 /*
18 * A single CopyEngine (CE) comprises two "rings":
19 * a source ring
20 * a destination ring
21 *
22 * Each ring consists of a number of descriptors which specify
23 * an address, length, and meta-data.
24 *
25 * Typically, one side of the PCIe/AHB/SNOC interconnect (Host or Target)
26 * controls one ring and the other side controls the other ring.
27 * The source side chooses when to initiate a transfer and it
28 * chooses what to send (buffer address, length). The destination
29 * side keeps a supply of "anonymous receive buffers" available and
30 * it handles incoming data as it arrives (when the destination
31 * receives an interrupt).
32 *
33 * The sender may send a simple buffer (address/length) or it may
34 * send a small list of buffers. When a small list is sent, hardware
35 * "gathers" these and they end up in a single destination buffer
36 * with a single interrupt.
37 *
38 * There are several "contexts" managed by this layer -- more, it
39 * may seem -- than should be needed. These are provided mainly for
40 * maximum flexibility and especially to facilitate a simpler HIF
41 * implementation. There are per-CopyEngine recv, send, and watermark
42 * contexts. These are supplied by the caller when a recv, send,
43 * or watermark handler is established and they are echoed back to
44 * the caller when the respective callbacks are invoked. There is
45 * also a per-transfer context supplied by the caller when a buffer
46 * (or sendlist) is sent and when a buffer is enqueued for recv.
47 * These per-transfer contexts are echoed back to the caller when
48 * the buffer is sent/received.
49 */
50
shadow_sr_wr_ind_addr(struct ath10k * ar,struct ath10k_ce_pipe * ce_state)51 static inline u32 shadow_sr_wr_ind_addr(struct ath10k *ar,
52 struct ath10k_ce_pipe *ce_state)
53 {
54 u32 ce_id = ce_state->id;
55 u32 addr = 0;
56
57 switch (ce_id) {
58 case 0:
59 addr = 0x00032000;
60 break;
61 case 3:
62 addr = 0x0003200C;
63 break;
64 case 4:
65 addr = 0x00032010;
66 break;
67 case 5:
68 addr = 0x00032014;
69 break;
70 case 7:
71 addr = 0x0003201C;
72 break;
73 default:
74 ath10k_warn(ar, "invalid CE id: %d", ce_id);
75 break;
76 }
77 return addr;
78 }
79
80 static inline unsigned int
ath10k_set_ring_byte(unsigned int offset,struct ath10k_hw_ce_regs_addr_map * addr_map)81 ath10k_set_ring_byte(unsigned int offset,
82 struct ath10k_hw_ce_regs_addr_map *addr_map)
83 {
84 return ((offset << addr_map->lsb) & addr_map->mask);
85 }
86
ath10k_ce_read32(struct ath10k * ar,u32 offset)87 static inline u32 ath10k_ce_read32(struct ath10k *ar, u32 offset)
88 {
89 struct ath10k_ce *ce = ath10k_ce_priv(ar);
90
91 return ce->bus_ops->read32(ar, offset);
92 }
93
ath10k_ce_write32(struct ath10k * ar,u32 offset,u32 value)94 static inline void ath10k_ce_write32(struct ath10k *ar, u32 offset, u32 value)
95 {
96 struct ath10k_ce *ce = ath10k_ce_priv(ar);
97
98 ce->bus_ops->write32(ar, offset, value);
99 }
100
ath10k_ce_dest_ring_write_index_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)101 static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
102 u32 ce_ctrl_addr,
103 unsigned int n)
104 {
105 ath10k_ce_write32(ar, ce_ctrl_addr +
106 ar->hw_ce_regs->dst_wr_index_addr, n);
107 }
108
ath10k_ce_dest_ring_write_index_get(struct ath10k * ar,u32 ce_ctrl_addr)109 static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
110 u32 ce_ctrl_addr)
111 {
112 return ath10k_ce_read32(ar, ce_ctrl_addr +
113 ar->hw_ce_regs->dst_wr_index_addr);
114 }
115
ath10k_ce_src_ring_write_index_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)116 static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
117 u32 ce_ctrl_addr,
118 unsigned int n)
119 {
120 ath10k_ce_write32(ar, ce_ctrl_addr +
121 ar->hw_ce_regs->sr_wr_index_addr, n);
122 }
123
ath10k_ce_src_ring_write_index_get(struct ath10k * ar,u32 ce_ctrl_addr)124 static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
125 u32 ce_ctrl_addr)
126 {
127 return ath10k_ce_read32(ar, ce_ctrl_addr +
128 ar->hw_ce_regs->sr_wr_index_addr);
129 }
130
ath10k_ce_src_ring_read_index_from_ddr(struct ath10k * ar,u32 ce_id)131 static inline u32 ath10k_ce_src_ring_read_index_from_ddr(struct ath10k *ar,
132 u32 ce_id)
133 {
134 struct ath10k_ce *ce = ath10k_ce_priv(ar);
135
136 return ce->vaddr_rri[ce_id] & CE_DDR_RRI_MASK;
137 }
138
ath10k_ce_src_ring_read_index_get(struct ath10k * ar,u32 ce_ctrl_addr)139 static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
140 u32 ce_ctrl_addr)
141 {
142 struct ath10k_ce *ce = ath10k_ce_priv(ar);
143 u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr);
144 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
145 u32 index;
146
147 if (ar->hw_params.rri_on_ddr &&
148 (ce_state->attr_flags & CE_ATTR_DIS_INTR))
149 index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_id);
150 else
151 index = ath10k_ce_read32(ar, ce_ctrl_addr +
152 ar->hw_ce_regs->current_srri_addr);
153
154 return index;
155 }
156
157 static inline void
ath10k_ce_shadow_src_ring_write_index_set(struct ath10k * ar,struct ath10k_ce_pipe * ce_state,unsigned int value)158 ath10k_ce_shadow_src_ring_write_index_set(struct ath10k *ar,
159 struct ath10k_ce_pipe *ce_state,
160 unsigned int value)
161 {
162 ath10k_ce_write32(ar, shadow_sr_wr_ind_addr(ar, ce_state), value);
163 }
164
ath10k_ce_src_ring_base_addr_set(struct ath10k * ar,u32 ce_id,u64 addr)165 static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
166 u32 ce_id,
167 u64 addr)
168 {
169 struct ath10k_ce *ce = ath10k_ce_priv(ar);
170 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
171 u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id);
172 u32 addr_lo = lower_32_bits(addr);
173
174 ath10k_ce_write32(ar, ce_ctrl_addr +
175 ar->hw_ce_regs->sr_base_addr_lo, addr_lo);
176
177 if (ce_state->ops->ce_set_src_ring_base_addr_hi) {
178 ce_state->ops->ce_set_src_ring_base_addr_hi(ar, ce_ctrl_addr,
179 addr);
180 }
181 }
182
ath10k_ce_set_src_ring_base_addr_hi(struct ath10k * ar,u32 ce_ctrl_addr,u64 addr)183 static void ath10k_ce_set_src_ring_base_addr_hi(struct ath10k *ar,
184 u32 ce_ctrl_addr,
185 u64 addr)
186 {
187 u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK;
188
189 ath10k_ce_write32(ar, ce_ctrl_addr +
190 ar->hw_ce_regs->sr_base_addr_hi, addr_hi);
191 }
192
ath10k_ce_src_ring_size_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)193 static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
194 u32 ce_ctrl_addr,
195 unsigned int n)
196 {
197 ath10k_ce_write32(ar, ce_ctrl_addr +
198 ar->hw_ce_regs->sr_size_addr, n);
199 }
200
ath10k_ce_src_ring_dmax_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)201 static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
202 u32 ce_ctrl_addr,
203 unsigned int n)
204 {
205 struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
206
207 u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
208 ctrl_regs->addr);
209
210 ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
211 (ctrl1_addr & ~(ctrl_regs->dmax->mask)) |
212 ath10k_set_ring_byte(n, ctrl_regs->dmax));
213 }
214
ath10k_ce_src_ring_byte_swap_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)215 static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
216 u32 ce_ctrl_addr,
217 unsigned int n)
218 {
219 struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
220
221 u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
222 ctrl_regs->addr);
223
224 ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
225 (ctrl1_addr & ~(ctrl_regs->src_ring->mask)) |
226 ath10k_set_ring_byte(n, ctrl_regs->src_ring));
227 }
228
ath10k_ce_dest_ring_byte_swap_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)229 static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
230 u32 ce_ctrl_addr,
231 unsigned int n)
232 {
233 struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
234
235 u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
236 ctrl_regs->addr);
237
238 ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
239 (ctrl1_addr & ~(ctrl_regs->dst_ring->mask)) |
240 ath10k_set_ring_byte(n, ctrl_regs->dst_ring));
241 }
242
243 static inline
ath10k_ce_dest_ring_read_index_from_ddr(struct ath10k * ar,u32 ce_id)244 u32 ath10k_ce_dest_ring_read_index_from_ddr(struct ath10k *ar, u32 ce_id)
245 {
246 struct ath10k_ce *ce = ath10k_ce_priv(ar);
247
248 return (ce->vaddr_rri[ce_id] >> CE_DDR_DRRI_SHIFT) &
249 CE_DDR_RRI_MASK;
250 }
251
ath10k_ce_dest_ring_read_index_get(struct ath10k * ar,u32 ce_ctrl_addr)252 static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
253 u32 ce_ctrl_addr)
254 {
255 struct ath10k_ce *ce = ath10k_ce_priv(ar);
256 u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr);
257 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
258 u32 index;
259
260 if (ar->hw_params.rri_on_ddr &&
261 (ce_state->attr_flags & CE_ATTR_DIS_INTR))
262 index = ath10k_ce_dest_ring_read_index_from_ddr(ar, ce_id);
263 else
264 index = ath10k_ce_read32(ar, ce_ctrl_addr +
265 ar->hw_ce_regs->current_drri_addr);
266
267 return index;
268 }
269
ath10k_ce_dest_ring_base_addr_set(struct ath10k * ar,u32 ce_id,u64 addr)270 static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
271 u32 ce_id,
272 u64 addr)
273 {
274 struct ath10k_ce *ce = ath10k_ce_priv(ar);
275 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
276 u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id);
277 u32 addr_lo = lower_32_bits(addr);
278
279 ath10k_ce_write32(ar, ce_ctrl_addr +
280 ar->hw_ce_regs->dr_base_addr_lo, addr_lo);
281
282 if (ce_state->ops->ce_set_dest_ring_base_addr_hi) {
283 ce_state->ops->ce_set_dest_ring_base_addr_hi(ar, ce_ctrl_addr,
284 addr);
285 }
286 }
287
ath10k_ce_set_dest_ring_base_addr_hi(struct ath10k * ar,u32 ce_ctrl_addr,u64 addr)288 static void ath10k_ce_set_dest_ring_base_addr_hi(struct ath10k *ar,
289 u32 ce_ctrl_addr,
290 u64 addr)
291 {
292 u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK;
293 u32 reg_value;
294
295 reg_value = ath10k_ce_read32(ar, ce_ctrl_addr +
296 ar->hw_ce_regs->dr_base_addr_hi);
297 reg_value &= ~CE_DESC_ADDR_HI_MASK;
298 reg_value |= addr_hi;
299 ath10k_ce_write32(ar, ce_ctrl_addr +
300 ar->hw_ce_regs->dr_base_addr_hi, reg_value);
301 }
302
ath10k_ce_dest_ring_size_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)303 static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
304 u32 ce_ctrl_addr,
305 unsigned int n)
306 {
307 ath10k_ce_write32(ar, ce_ctrl_addr +
308 ar->hw_ce_regs->dr_size_addr, n);
309 }
310
ath10k_ce_src_ring_highmark_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)311 static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
312 u32 ce_ctrl_addr,
313 unsigned int n)
314 {
315 struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
316 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
317
318 ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
319 (addr & ~(srcr_wm->wm_high->mask)) |
320 (ath10k_set_ring_byte(n, srcr_wm->wm_high)));
321 }
322
ath10k_ce_src_ring_lowmark_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)323 static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
324 u32 ce_ctrl_addr,
325 unsigned int n)
326 {
327 struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
328 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
329
330 ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
331 (addr & ~(srcr_wm->wm_low->mask)) |
332 (ath10k_set_ring_byte(n, srcr_wm->wm_low)));
333 }
334
ath10k_ce_dest_ring_highmark_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)335 static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
336 u32 ce_ctrl_addr,
337 unsigned int n)
338 {
339 struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
340 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
341
342 ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
343 (addr & ~(dstr_wm->wm_high->mask)) |
344 (ath10k_set_ring_byte(n, dstr_wm->wm_high)));
345 }
346
ath10k_ce_dest_ring_lowmark_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)347 static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
348 u32 ce_ctrl_addr,
349 unsigned int n)
350 {
351 struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
352 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
353
354 ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
355 (addr & ~(dstr_wm->wm_low->mask)) |
356 (ath10k_set_ring_byte(n, dstr_wm->wm_low)));
357 }
358
ath10k_ce_copy_complete_inter_enable(struct ath10k * ar,u32 ce_ctrl_addr)359 static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
360 u32 ce_ctrl_addr)
361 {
362 struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
363
364 u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
365 ar->hw_ce_regs->host_ie_addr);
366
367 ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
368 host_ie_addr | host_ie->copy_complete->mask);
369 }
370
ath10k_ce_copy_complete_intr_disable(struct ath10k * ar,u32 ce_ctrl_addr)371 static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
372 u32 ce_ctrl_addr)
373 {
374 struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
375
376 u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
377 ar->hw_ce_regs->host_ie_addr);
378
379 ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
380 host_ie_addr & ~(host_ie->copy_complete->mask));
381 }
382
ath10k_ce_watermark_intr_disable(struct ath10k * ar,u32 ce_ctrl_addr)383 static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
384 u32 ce_ctrl_addr)
385 {
386 struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
387
388 u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
389 ar->hw_ce_regs->host_ie_addr);
390
391 ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
392 host_ie_addr & ~(wm_regs->wm_mask));
393 }
394
ath10k_ce_error_intr_disable(struct ath10k * ar,u32 ce_ctrl_addr)395 static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
396 u32 ce_ctrl_addr)
397 {
398 struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
399
400 u32 misc_ie_addr = ath10k_ce_read32(ar,
401 ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr);
402
403 ath10k_ce_write32(ar,
404 ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr,
405 misc_ie_addr & ~(misc_regs->err_mask));
406 }
407
ath10k_ce_engine_int_status_clear(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int mask)408 static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
409 u32 ce_ctrl_addr,
410 unsigned int mask)
411 {
412 struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
413
414 ath10k_ce_write32(ar, ce_ctrl_addr + wm_regs->addr, mask);
415 }
416
417 /*
418 * Guts of ath10k_ce_send.
419 * The caller takes responsibility for any needed locking.
420 */
_ath10k_ce_send_nolock(struct ath10k_ce_pipe * ce_state,void * per_transfer_context,dma_addr_t buffer,unsigned int nbytes,unsigned int transfer_id,unsigned int flags)421 static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
422 void *per_transfer_context,
423 dma_addr_t buffer,
424 unsigned int nbytes,
425 unsigned int transfer_id,
426 unsigned int flags)
427 {
428 struct ath10k *ar = ce_state->ar;
429 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
430 struct ce_desc *desc, sdesc;
431 unsigned int nentries_mask = src_ring->nentries_mask;
432 unsigned int sw_index = src_ring->sw_index;
433 unsigned int write_index = src_ring->write_index;
434 u32 ctrl_addr = ce_state->ctrl_addr;
435 u32 desc_flags = 0;
436 int ret = 0;
437
438 if (nbytes > ce_state->src_sz_max)
439 ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
440 __func__, nbytes, ce_state->src_sz_max);
441
442 if (unlikely(CE_RING_DELTA(nentries_mask,
443 write_index, sw_index - 1) <= 0)) {
444 ret = -ENOSR;
445 goto exit;
446 }
447
448 desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
449 write_index);
450
451 desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
452
453 if (flags & CE_SEND_FLAG_GATHER)
454 desc_flags |= CE_DESC_FLAGS_GATHER;
455 if (flags & CE_SEND_FLAG_BYTE_SWAP)
456 desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
457
458 sdesc.addr = __cpu_to_le32(buffer);
459 sdesc.nbytes = __cpu_to_le16(nbytes);
460 sdesc.flags = __cpu_to_le16(desc_flags);
461
462 *desc = sdesc;
463
464 src_ring->per_transfer_context[write_index] = per_transfer_context;
465
466 /* Update Source Ring Write Index */
467 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
468
469 /* WORKAROUND */
470 if (!(flags & CE_SEND_FLAG_GATHER))
471 ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
472
473 src_ring->write_index = write_index;
474 exit:
475 return ret;
476 }
477
_ath10k_ce_send_nolock_64(struct ath10k_ce_pipe * ce_state,void * per_transfer_context,dma_addr_t buffer,unsigned int nbytes,unsigned int transfer_id,unsigned int flags)478 static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state,
479 void *per_transfer_context,
480 dma_addr_t buffer,
481 unsigned int nbytes,
482 unsigned int transfer_id,
483 unsigned int flags)
484 {
485 struct ath10k *ar = ce_state->ar;
486 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
487 struct ce_desc_64 *desc, sdesc;
488 unsigned int nentries_mask = src_ring->nentries_mask;
489 unsigned int sw_index;
490 unsigned int write_index = src_ring->write_index;
491 u32 ctrl_addr = ce_state->ctrl_addr;
492 __le32 *addr;
493 u32 desc_flags = 0;
494 int ret = 0;
495
496 if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
497 return -ESHUTDOWN;
498
499 if (nbytes > ce_state->src_sz_max)
500 ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
501 __func__, nbytes, ce_state->src_sz_max);
502
503 if (ar->hw_params.rri_on_ddr)
504 sw_index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_state->id);
505 else
506 sw_index = src_ring->sw_index;
507
508 if (unlikely(CE_RING_DELTA(nentries_mask,
509 write_index, sw_index - 1) <= 0)) {
510 ret = -ENOSR;
511 goto exit;
512 }
513
514 desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space,
515 write_index);
516
517 desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
518
519 if (flags & CE_SEND_FLAG_GATHER)
520 desc_flags |= CE_DESC_FLAGS_GATHER;
521
522 if (flags & CE_SEND_FLAG_BYTE_SWAP)
523 desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
524
525 addr = (__le32 *)&sdesc.addr;
526
527 flags |= upper_32_bits(buffer) & CE_DESC_ADDR_HI_MASK;
528 addr[0] = __cpu_to_le32(buffer);
529 addr[1] = __cpu_to_le32(flags);
530 if (flags & CE_SEND_FLAG_GATHER)
531 addr[1] |= __cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER);
532 else
533 addr[1] &= ~(__cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER));
534
535 sdesc.nbytes = __cpu_to_le16(nbytes);
536 sdesc.flags = __cpu_to_le16(desc_flags);
537
538 *desc = sdesc;
539
540 src_ring->per_transfer_context[write_index] = per_transfer_context;
541
542 /* Update Source Ring Write Index */
543 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
544
545 if (!(flags & CE_SEND_FLAG_GATHER)) {
546 if (ar->hw_params.shadow_reg_support)
547 ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state,
548 write_index);
549 else
550 ath10k_ce_src_ring_write_index_set(ar, ctrl_addr,
551 write_index);
552 }
553
554 src_ring->write_index = write_index;
555 exit:
556 return ret;
557 }
558
ath10k_ce_send_nolock(struct ath10k_ce_pipe * ce_state,void * per_transfer_context,dma_addr_t buffer,unsigned int nbytes,unsigned int transfer_id,unsigned int flags)559 int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
560 void *per_transfer_context,
561 dma_addr_t buffer,
562 unsigned int nbytes,
563 unsigned int transfer_id,
564 unsigned int flags)
565 {
566 return ce_state->ops->ce_send_nolock(ce_state, per_transfer_context,
567 buffer, nbytes, transfer_id, flags);
568 }
569 EXPORT_SYMBOL(ath10k_ce_send_nolock);
570
__ath10k_ce_send_revert(struct ath10k_ce_pipe * pipe)571 void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
572 {
573 struct ath10k *ar = pipe->ar;
574 struct ath10k_ce *ce = ath10k_ce_priv(ar);
575 struct ath10k_ce_ring *src_ring = pipe->src_ring;
576 u32 ctrl_addr = pipe->ctrl_addr;
577
578 lockdep_assert_held(&ce->ce_lock);
579
580 /*
581 * This function must be called only if there is an incomplete
582 * scatter-gather transfer (before index register is updated)
583 * that needs to be cleaned up.
584 */
585 if (WARN_ON_ONCE(src_ring->write_index == src_ring->sw_index))
586 return;
587
588 if (WARN_ON_ONCE(src_ring->write_index ==
589 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr)))
590 return;
591
592 src_ring->write_index--;
593 src_ring->write_index &= src_ring->nentries_mask;
594
595 src_ring->per_transfer_context[src_ring->write_index] = NULL;
596 }
597 EXPORT_SYMBOL(__ath10k_ce_send_revert);
598
ath10k_ce_send(struct ath10k_ce_pipe * ce_state,void * per_transfer_context,dma_addr_t buffer,unsigned int nbytes,unsigned int transfer_id,unsigned int flags)599 int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
600 void *per_transfer_context,
601 dma_addr_t buffer,
602 unsigned int nbytes,
603 unsigned int transfer_id,
604 unsigned int flags)
605 {
606 struct ath10k *ar = ce_state->ar;
607 struct ath10k_ce *ce = ath10k_ce_priv(ar);
608 int ret;
609
610 spin_lock_bh(&ce->ce_lock);
611 ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
612 buffer, nbytes, transfer_id, flags);
613 spin_unlock_bh(&ce->ce_lock);
614
615 return ret;
616 }
617 EXPORT_SYMBOL(ath10k_ce_send);
618
ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe * pipe)619 int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
620 {
621 struct ath10k *ar = pipe->ar;
622 struct ath10k_ce *ce = ath10k_ce_priv(ar);
623 int delta;
624
625 spin_lock_bh(&ce->ce_lock);
626 delta = CE_RING_DELTA(pipe->src_ring->nentries_mask,
627 pipe->src_ring->write_index,
628 pipe->src_ring->sw_index - 1);
629 spin_unlock_bh(&ce->ce_lock);
630
631 return delta;
632 }
633 EXPORT_SYMBOL(ath10k_ce_num_free_src_entries);
634
__ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe * pipe)635 int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
636 {
637 struct ath10k *ar = pipe->ar;
638 struct ath10k_ce *ce = ath10k_ce_priv(ar);
639 struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
640 unsigned int nentries_mask = dest_ring->nentries_mask;
641 unsigned int write_index = dest_ring->write_index;
642 unsigned int sw_index = dest_ring->sw_index;
643
644 lockdep_assert_held(&ce->ce_lock);
645
646 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
647 }
648 EXPORT_SYMBOL(__ath10k_ce_rx_num_free_bufs);
649
__ath10k_ce_rx_post_buf(struct ath10k_ce_pipe * pipe,void * ctx,dma_addr_t paddr)650 static int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
651 dma_addr_t paddr)
652 {
653 struct ath10k *ar = pipe->ar;
654 struct ath10k_ce *ce = ath10k_ce_priv(ar);
655 struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
656 unsigned int nentries_mask = dest_ring->nentries_mask;
657 unsigned int write_index = dest_ring->write_index;
658 unsigned int sw_index = dest_ring->sw_index;
659 struct ce_desc *base = dest_ring->base_addr_owner_space;
660 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
661 u32 ctrl_addr = pipe->ctrl_addr;
662
663 lockdep_assert_held(&ce->ce_lock);
664
665 if ((pipe->id != 5) &&
666 CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
667 return -ENOSPC;
668
669 desc->addr = __cpu_to_le32(paddr);
670 desc->nbytes = 0;
671
672 dest_ring->per_transfer_context[write_index] = ctx;
673 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
674 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
675 dest_ring->write_index = write_index;
676
677 return 0;
678 }
679
__ath10k_ce_rx_post_buf_64(struct ath10k_ce_pipe * pipe,void * ctx,dma_addr_t paddr)680 static int __ath10k_ce_rx_post_buf_64(struct ath10k_ce_pipe *pipe,
681 void *ctx,
682 dma_addr_t paddr)
683 {
684 struct ath10k *ar = pipe->ar;
685 struct ath10k_ce *ce = ath10k_ce_priv(ar);
686 struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
687 unsigned int nentries_mask = dest_ring->nentries_mask;
688 unsigned int write_index = dest_ring->write_index;
689 unsigned int sw_index = dest_ring->sw_index;
690 struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
691 struct ce_desc_64 *desc =
692 CE_DEST_RING_TO_DESC_64(base, write_index);
693 u32 ctrl_addr = pipe->ctrl_addr;
694
695 lockdep_assert_held(&ce->ce_lock);
696
697 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
698 return -ENOSPC;
699
700 desc->addr = __cpu_to_le64(paddr);
701 desc->addr &= __cpu_to_le64(CE_DESC_ADDR_MASK);
702
703 desc->nbytes = 0;
704
705 dest_ring->per_transfer_context[write_index] = ctx;
706 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
707 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
708 dest_ring->write_index = write_index;
709
710 return 0;
711 }
712
ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe * pipe,u32 nentries)713 void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
714 {
715 struct ath10k *ar = pipe->ar;
716 struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
717 unsigned int nentries_mask = dest_ring->nentries_mask;
718 unsigned int write_index = dest_ring->write_index;
719 u32 ctrl_addr = pipe->ctrl_addr;
720 u32 cur_write_idx = ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
721
722 /* Prevent CE ring stuck issue that will occur when ring is full.
723 * Make sure that write index is 1 less than read index.
724 */
725 if (((cur_write_idx + nentries) & nentries_mask) == dest_ring->sw_index)
726 nentries -= 1;
727
728 write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries);
729 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
730 dest_ring->write_index = write_index;
731 }
732 EXPORT_SYMBOL(ath10k_ce_rx_update_write_idx);
733
ath10k_ce_rx_post_buf(struct ath10k_ce_pipe * pipe,void * ctx,dma_addr_t paddr)734 int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
735 dma_addr_t paddr)
736 {
737 struct ath10k *ar = pipe->ar;
738 struct ath10k_ce *ce = ath10k_ce_priv(ar);
739 int ret;
740
741 spin_lock_bh(&ce->ce_lock);
742 ret = pipe->ops->ce_rx_post_buf(pipe, ctx, paddr);
743 spin_unlock_bh(&ce->ce_lock);
744
745 return ret;
746 }
747 EXPORT_SYMBOL(ath10k_ce_rx_post_buf);
748
749 /*
750 * Guts of ath10k_ce_completed_recv_next.
751 * The caller takes responsibility for any necessary locking.
752 */
753 static int
_ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp,unsigned int * nbytesp)754 _ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
755 void **per_transfer_contextp,
756 unsigned int *nbytesp)
757 {
758 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
759 unsigned int nentries_mask = dest_ring->nentries_mask;
760 unsigned int sw_index = dest_ring->sw_index;
761
762 struct ce_desc *base = dest_ring->base_addr_owner_space;
763 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
764 struct ce_desc sdesc;
765 u16 nbytes;
766
767 /* Copy in one go for performance reasons */
768 sdesc = *desc;
769
770 nbytes = __le16_to_cpu(sdesc.nbytes);
771 if (nbytes == 0) {
772 /*
773 * This closes a relatively unusual race where the Host
774 * sees the updated DRRI before the update to the
775 * corresponding descriptor has completed. We treat this
776 * as a descriptor that is not yet done.
777 */
778 return -EIO;
779 }
780
781 desc->nbytes = 0;
782
783 /* Return data from completed destination descriptor */
784 *nbytesp = nbytes;
785
786 if (per_transfer_contextp)
787 *per_transfer_contextp =
788 dest_ring->per_transfer_context[sw_index];
789
790 /* Copy engine 5 (HTT Rx) will reuse the same transfer context.
791 * So update transfer context all CEs except CE5.
792 */
793 if (ce_state->id != 5)
794 dest_ring->per_transfer_context[sw_index] = NULL;
795
796 /* Update sw_index */
797 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
798 dest_ring->sw_index = sw_index;
799
800 return 0;
801 }
802
803 static int
_ath10k_ce_completed_recv_next_nolock_64(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp,unsigned int * nbytesp)804 _ath10k_ce_completed_recv_next_nolock_64(struct ath10k_ce_pipe *ce_state,
805 void **per_transfer_contextp,
806 unsigned int *nbytesp)
807 {
808 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
809 unsigned int nentries_mask = dest_ring->nentries_mask;
810 unsigned int sw_index = dest_ring->sw_index;
811 struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
812 struct ce_desc_64 *desc =
813 CE_DEST_RING_TO_DESC_64(base, sw_index);
814 struct ce_desc_64 sdesc;
815 u16 nbytes;
816
817 /* Copy in one go for performance reasons */
818 sdesc = *desc;
819
820 nbytes = __le16_to_cpu(sdesc.nbytes);
821 if (nbytes == 0) {
822 /* This closes a relatively unusual race where the Host
823 * sees the updated DRRI before the update to the
824 * corresponding descriptor has completed. We treat this
825 * as a descriptor that is not yet done.
826 */
827 return -EIO;
828 }
829
830 desc->nbytes = 0;
831
832 /* Return data from completed destination descriptor */
833 *nbytesp = nbytes;
834
835 if (per_transfer_contextp)
836 *per_transfer_contextp =
837 dest_ring->per_transfer_context[sw_index];
838
839 /* Copy engine 5 (HTT Rx) will reuse the same transfer context.
840 * So update transfer context all CEs except CE5.
841 */
842 if (ce_state->id != 5)
843 dest_ring->per_transfer_context[sw_index] = NULL;
844
845 /* Update sw_index */
846 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
847 dest_ring->sw_index = sw_index;
848
849 return 0;
850 }
851
ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe * ce_state,void ** per_transfer_ctx,unsigned int * nbytesp)852 int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
853 void **per_transfer_ctx,
854 unsigned int *nbytesp)
855 {
856 return ce_state->ops->ce_completed_recv_next_nolock(ce_state,
857 per_transfer_ctx,
858 nbytesp);
859 }
860 EXPORT_SYMBOL(ath10k_ce_completed_recv_next_nolock);
861
ath10k_ce_completed_recv_next(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp,unsigned int * nbytesp)862 int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
863 void **per_transfer_contextp,
864 unsigned int *nbytesp)
865 {
866 struct ath10k *ar = ce_state->ar;
867 struct ath10k_ce *ce = ath10k_ce_priv(ar);
868 int ret;
869
870 spin_lock_bh(&ce->ce_lock);
871 ret = ce_state->ops->ce_completed_recv_next_nolock(ce_state,
872 per_transfer_contextp,
873 nbytesp);
874
875 spin_unlock_bh(&ce->ce_lock);
876
877 return ret;
878 }
879 EXPORT_SYMBOL(ath10k_ce_completed_recv_next);
880
_ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp,dma_addr_t * bufferp)881 static int _ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
882 void **per_transfer_contextp,
883 dma_addr_t *bufferp)
884 {
885 struct ath10k_ce_ring *dest_ring;
886 unsigned int nentries_mask;
887 unsigned int sw_index;
888 unsigned int write_index;
889 int ret;
890 struct ath10k *ar;
891 struct ath10k_ce *ce;
892
893 dest_ring = ce_state->dest_ring;
894
895 if (!dest_ring)
896 return -EIO;
897
898 ar = ce_state->ar;
899 ce = ath10k_ce_priv(ar);
900
901 spin_lock_bh(&ce->ce_lock);
902
903 nentries_mask = dest_ring->nentries_mask;
904 sw_index = dest_ring->sw_index;
905 write_index = dest_ring->write_index;
906 if (write_index != sw_index) {
907 struct ce_desc *base = dest_ring->base_addr_owner_space;
908 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
909
910 /* Return data from completed destination descriptor */
911 *bufferp = __le32_to_cpu(desc->addr);
912
913 if (per_transfer_contextp)
914 *per_transfer_contextp =
915 dest_ring->per_transfer_context[sw_index];
916
917 /* sanity */
918 dest_ring->per_transfer_context[sw_index] = NULL;
919 desc->nbytes = 0;
920
921 /* Update sw_index */
922 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
923 dest_ring->sw_index = sw_index;
924 ret = 0;
925 } else {
926 ret = -EIO;
927 }
928
929 spin_unlock_bh(&ce->ce_lock);
930
931 return ret;
932 }
933
_ath10k_ce_revoke_recv_next_64(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp,dma_addr_t * bufferp)934 static int _ath10k_ce_revoke_recv_next_64(struct ath10k_ce_pipe *ce_state,
935 void **per_transfer_contextp,
936 dma_addr_t *bufferp)
937 {
938 struct ath10k_ce_ring *dest_ring;
939 unsigned int nentries_mask;
940 unsigned int sw_index;
941 unsigned int write_index;
942 int ret;
943 struct ath10k *ar;
944 struct ath10k_ce *ce;
945
946 dest_ring = ce_state->dest_ring;
947
948 if (!dest_ring)
949 return -EIO;
950
951 ar = ce_state->ar;
952 ce = ath10k_ce_priv(ar);
953
954 spin_lock_bh(&ce->ce_lock);
955
956 nentries_mask = dest_ring->nentries_mask;
957 sw_index = dest_ring->sw_index;
958 write_index = dest_ring->write_index;
959 if (write_index != sw_index) {
960 struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
961 struct ce_desc_64 *desc =
962 CE_DEST_RING_TO_DESC_64(base, sw_index);
963
964 /* Return data from completed destination descriptor */
965 *bufferp = __le64_to_cpu(desc->addr);
966
967 if (per_transfer_contextp)
968 *per_transfer_contextp =
969 dest_ring->per_transfer_context[sw_index];
970
971 /* sanity */
972 dest_ring->per_transfer_context[sw_index] = NULL;
973 desc->nbytes = 0;
974
975 /* Update sw_index */
976 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
977 dest_ring->sw_index = sw_index;
978 ret = 0;
979 } else {
980 ret = -EIO;
981 }
982
983 spin_unlock_bh(&ce->ce_lock);
984
985 return ret;
986 }
987
ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp,dma_addr_t * bufferp)988 int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
989 void **per_transfer_contextp,
990 dma_addr_t *bufferp)
991 {
992 return ce_state->ops->ce_revoke_recv_next(ce_state,
993 per_transfer_contextp,
994 bufferp);
995 }
996 EXPORT_SYMBOL(ath10k_ce_revoke_recv_next);
997
998 /*
999 * Guts of ath10k_ce_completed_send_next.
1000 * The caller takes responsibility for any necessary locking.
1001 */
_ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp)1002 static int _ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
1003 void **per_transfer_contextp)
1004 {
1005 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
1006 u32 ctrl_addr = ce_state->ctrl_addr;
1007 struct ath10k *ar = ce_state->ar;
1008 unsigned int nentries_mask = src_ring->nentries_mask;
1009 unsigned int sw_index = src_ring->sw_index;
1010 unsigned int read_index;
1011 struct ce_desc *desc;
1012
1013 if (src_ring->hw_index == sw_index) {
1014 /*
1015 * The SW completion index has caught up with the cached
1016 * version of the HW completion index.
1017 * Update the cached HW completion index to see whether
1018 * the SW has really caught up to the HW, or if the cached
1019 * value of the HW index has become stale.
1020 */
1021
1022 read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
1023 if (read_index == 0xffffffff)
1024 return -ENODEV;
1025
1026 read_index &= nentries_mask;
1027 src_ring->hw_index = read_index;
1028 }
1029
1030 if (ar->hw_params.rri_on_ddr)
1031 read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
1032 else
1033 read_index = src_ring->hw_index;
1034
1035 if (read_index == sw_index)
1036 return -EIO;
1037
1038 if (per_transfer_contextp)
1039 *per_transfer_contextp =
1040 src_ring->per_transfer_context[sw_index];
1041
1042 /* sanity */
1043 src_ring->per_transfer_context[sw_index] = NULL;
1044 desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
1045 sw_index);
1046 desc->nbytes = 0;
1047
1048 /* Update sw_index */
1049 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1050 src_ring->sw_index = sw_index;
1051
1052 return 0;
1053 }
1054
_ath10k_ce_completed_send_next_nolock_64(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp)1055 static int _ath10k_ce_completed_send_next_nolock_64(struct ath10k_ce_pipe *ce_state,
1056 void **per_transfer_contextp)
1057 {
1058 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
1059 u32 ctrl_addr = ce_state->ctrl_addr;
1060 struct ath10k *ar = ce_state->ar;
1061 unsigned int nentries_mask = src_ring->nentries_mask;
1062 unsigned int sw_index = src_ring->sw_index;
1063 unsigned int read_index;
1064 struct ce_desc_64 *desc;
1065
1066 if (src_ring->hw_index == sw_index) {
1067 /*
1068 * The SW completion index has caught up with the cached
1069 * version of the HW completion index.
1070 * Update the cached HW completion index to see whether
1071 * the SW has really caught up to the HW, or if the cached
1072 * value of the HW index has become stale.
1073 */
1074
1075 read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
1076 if (read_index == 0xffffffff)
1077 return -ENODEV;
1078
1079 read_index &= nentries_mask;
1080 src_ring->hw_index = read_index;
1081 }
1082
1083 if (ar->hw_params.rri_on_ddr)
1084 read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
1085 else
1086 read_index = src_ring->hw_index;
1087
1088 if (read_index == sw_index)
1089 return -EIO;
1090
1091 if (per_transfer_contextp)
1092 *per_transfer_contextp =
1093 src_ring->per_transfer_context[sw_index];
1094
1095 /* sanity */
1096 src_ring->per_transfer_context[sw_index] = NULL;
1097 desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space,
1098 sw_index);
1099 desc->nbytes = 0;
1100
1101 /* Update sw_index */
1102 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1103 src_ring->sw_index = sw_index;
1104
1105 return 0;
1106 }
1107
ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp)1108 int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
1109 void **per_transfer_contextp)
1110 {
1111 return ce_state->ops->ce_completed_send_next_nolock(ce_state,
1112 per_transfer_contextp);
1113 }
1114 EXPORT_SYMBOL(ath10k_ce_completed_send_next_nolock);
1115
ath10k_ce_extract_desc_data(struct ath10k * ar,struct ath10k_ce_ring * src_ring,u32 sw_index,dma_addr_t * bufferp,u32 * nbytesp,u32 * transfer_idp)1116 static void ath10k_ce_extract_desc_data(struct ath10k *ar,
1117 struct ath10k_ce_ring *src_ring,
1118 u32 sw_index,
1119 dma_addr_t *bufferp,
1120 u32 *nbytesp,
1121 u32 *transfer_idp)
1122 {
1123 struct ce_desc *base = src_ring->base_addr_owner_space;
1124 struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
1125
1126 /* Return data from completed source descriptor */
1127 *bufferp = __le32_to_cpu(desc->addr);
1128 *nbytesp = __le16_to_cpu(desc->nbytes);
1129 *transfer_idp = MS(__le16_to_cpu(desc->flags),
1130 CE_DESC_FLAGS_META_DATA);
1131 }
1132
ath10k_ce_extract_desc_data_64(struct ath10k * ar,struct ath10k_ce_ring * src_ring,u32 sw_index,dma_addr_t * bufferp,u32 * nbytesp,u32 * transfer_idp)1133 static void ath10k_ce_extract_desc_data_64(struct ath10k *ar,
1134 struct ath10k_ce_ring *src_ring,
1135 u32 sw_index,
1136 dma_addr_t *bufferp,
1137 u32 *nbytesp,
1138 u32 *transfer_idp)
1139 {
1140 struct ce_desc_64 *base = src_ring->base_addr_owner_space;
1141 struct ce_desc_64 *desc =
1142 CE_SRC_RING_TO_DESC_64(base, sw_index);
1143
1144 /* Return data from completed source descriptor */
1145 *bufferp = __le64_to_cpu(desc->addr);
1146 *nbytesp = __le16_to_cpu(desc->nbytes);
1147 *transfer_idp = MS(__le16_to_cpu(desc->flags),
1148 CE_DESC_FLAGS_META_DATA);
1149 }
1150
1151 /* NB: Modeled after ath10k_ce_completed_send_next */
ath10k_ce_cancel_send_next(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp,dma_addr_t * bufferp,unsigned int * nbytesp,unsigned int * transfer_idp)1152 int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
1153 void **per_transfer_contextp,
1154 dma_addr_t *bufferp,
1155 unsigned int *nbytesp,
1156 unsigned int *transfer_idp)
1157 {
1158 struct ath10k_ce_ring *src_ring;
1159 unsigned int nentries_mask;
1160 unsigned int sw_index;
1161 unsigned int write_index;
1162 int ret;
1163 struct ath10k *ar;
1164 struct ath10k_ce *ce;
1165
1166 src_ring = ce_state->src_ring;
1167
1168 if (!src_ring)
1169 return -EIO;
1170
1171 ar = ce_state->ar;
1172 ce = ath10k_ce_priv(ar);
1173
1174 spin_lock_bh(&ce->ce_lock);
1175
1176 nentries_mask = src_ring->nentries_mask;
1177 sw_index = src_ring->sw_index;
1178 write_index = src_ring->write_index;
1179
1180 if (write_index != sw_index) {
1181 ce_state->ops->ce_extract_desc_data(ar, src_ring, sw_index,
1182 bufferp, nbytesp,
1183 transfer_idp);
1184
1185 if (per_transfer_contextp)
1186 *per_transfer_contextp =
1187 src_ring->per_transfer_context[sw_index];
1188
1189 /* sanity */
1190 src_ring->per_transfer_context[sw_index] = NULL;
1191
1192 /* Update sw_index */
1193 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1194 src_ring->sw_index = sw_index;
1195 ret = 0;
1196 } else {
1197 ret = -EIO;
1198 }
1199
1200 spin_unlock_bh(&ce->ce_lock);
1201
1202 return ret;
1203 }
1204 EXPORT_SYMBOL(ath10k_ce_cancel_send_next);
1205
ath10k_ce_completed_send_next(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp)1206 int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
1207 void **per_transfer_contextp)
1208 {
1209 struct ath10k *ar = ce_state->ar;
1210 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1211 int ret;
1212
1213 spin_lock_bh(&ce->ce_lock);
1214 ret = ath10k_ce_completed_send_next_nolock(ce_state,
1215 per_transfer_contextp);
1216 spin_unlock_bh(&ce->ce_lock);
1217
1218 return ret;
1219 }
1220 EXPORT_SYMBOL(ath10k_ce_completed_send_next);
1221
1222 /*
1223 * Guts of interrupt handler for per-engine interrupts on a particular CE.
1224 *
1225 * Invokes registered callbacks for recv_complete,
1226 * send_complete, and watermarks.
1227 */
ath10k_ce_per_engine_service(struct ath10k * ar,unsigned int ce_id)1228 void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
1229 {
1230 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1231 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1232 struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
1233 u32 ctrl_addr = ce_state->ctrl_addr;
1234
1235 /*
1236 * Clear before handling
1237 *
1238 * Misc CE interrupts are not being handled, but still need
1239 * to be cleared.
1240 *
1241 * NOTE: When the last copy engine interrupt is cleared the
1242 * hardware will go to sleep. Once this happens any access to
1243 * the CE registers can cause a hardware fault.
1244 */
1245 ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
1246 wm_regs->cc_mask | wm_regs->wm_mask);
1247
1248 if (ce_state->recv_cb)
1249 ce_state->recv_cb(ce_state);
1250
1251 if (ce_state->send_cb)
1252 ce_state->send_cb(ce_state);
1253 }
1254 EXPORT_SYMBOL(ath10k_ce_per_engine_service);
1255
1256 /*
1257 * Handler for per-engine interrupts on ALL active CEs.
1258 * This is used in cases where the system is sharing a
1259 * single interrupt for all CEs
1260 */
1261
ath10k_ce_per_engine_service_any(struct ath10k * ar)1262 void ath10k_ce_per_engine_service_any(struct ath10k *ar)
1263 {
1264 int ce_id;
1265 u32 intr_summary;
1266
1267 intr_summary = ath10k_ce_interrupt_summary(ar);
1268
1269 for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {
1270 if (intr_summary & (1 << ce_id))
1271 intr_summary &= ~(1 << ce_id);
1272 else
1273 /* no intr pending on this CE */
1274 continue;
1275
1276 ath10k_ce_per_engine_service(ar, ce_id);
1277 }
1278 }
1279 EXPORT_SYMBOL(ath10k_ce_per_engine_service_any);
1280
1281 /*
1282 * Adjust interrupts for the copy complete handler.
1283 * If it's needed for either send or recv, then unmask
1284 * this interrupt; otherwise, mask it.
1285 *
1286 * Called with ce_lock held.
1287 */
ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe * ce_state)1288 static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state)
1289 {
1290 u32 ctrl_addr = ce_state->ctrl_addr;
1291 struct ath10k *ar = ce_state->ar;
1292 bool disable_copy_compl_intr = ce_state->attr_flags & CE_ATTR_DIS_INTR;
1293
1294 if ((!disable_copy_compl_intr) &&
1295 (ce_state->send_cb || ce_state->recv_cb))
1296 ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr);
1297 else
1298 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
1299
1300 ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
1301 }
1302
ath10k_ce_disable_interrupt(struct ath10k * ar,int ce_id)1303 void ath10k_ce_disable_interrupt(struct ath10k *ar, int ce_id)
1304 {
1305 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1306 struct ath10k_ce_pipe *ce_state;
1307 u32 ctrl_addr;
1308
1309 ce_state = &ce->ce_states[ce_id];
1310 if (ce_state->attr_flags & CE_ATTR_POLL)
1311 return;
1312
1313 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1314
1315 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
1316 ath10k_ce_error_intr_disable(ar, ctrl_addr);
1317 ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
1318 }
1319 EXPORT_SYMBOL(ath10k_ce_disable_interrupt);
1320
ath10k_ce_disable_interrupts(struct ath10k * ar)1321 void ath10k_ce_disable_interrupts(struct ath10k *ar)
1322 {
1323 int ce_id;
1324
1325 for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
1326 ath10k_ce_disable_interrupt(ar, ce_id);
1327 }
1328 EXPORT_SYMBOL(ath10k_ce_disable_interrupts);
1329
ath10k_ce_enable_interrupt(struct ath10k * ar,int ce_id)1330 void ath10k_ce_enable_interrupt(struct ath10k *ar, int ce_id)
1331 {
1332 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1333 struct ath10k_ce_pipe *ce_state;
1334
1335 ce_state = &ce->ce_states[ce_id];
1336 if (ce_state->attr_flags & CE_ATTR_POLL)
1337 return;
1338
1339 ath10k_ce_per_engine_handler_adjust(ce_state);
1340 }
1341 EXPORT_SYMBOL(ath10k_ce_enable_interrupt);
1342
ath10k_ce_enable_interrupts(struct ath10k * ar)1343 void ath10k_ce_enable_interrupts(struct ath10k *ar)
1344 {
1345 int ce_id;
1346
1347 /* Enable interrupts for copy engine that
1348 * are not using polling mode.
1349 */
1350 for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
1351 ath10k_ce_enable_interrupt(ar, ce_id);
1352 }
1353 EXPORT_SYMBOL(ath10k_ce_enable_interrupts);
1354
ath10k_ce_init_src_ring(struct ath10k * ar,unsigned int ce_id,const struct ce_attr * attr)1355 static int ath10k_ce_init_src_ring(struct ath10k *ar,
1356 unsigned int ce_id,
1357 const struct ce_attr *attr)
1358 {
1359 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1360 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1361 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
1362 u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1363
1364 nentries = roundup_pow_of_two(attr->src_nentries);
1365
1366 if (ar->hw_params.target_64bit)
1367 memset(src_ring->base_addr_owner_space, 0,
1368 nentries * sizeof(struct ce_desc_64));
1369 else
1370 memset(src_ring->base_addr_owner_space, 0,
1371 nentries * sizeof(struct ce_desc));
1372
1373 src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
1374 src_ring->sw_index &= src_ring->nentries_mask;
1375 src_ring->hw_index = src_ring->sw_index;
1376
1377 src_ring->write_index =
1378 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
1379 src_ring->write_index &= src_ring->nentries_mask;
1380
1381 ath10k_ce_src_ring_base_addr_set(ar, ce_id,
1382 src_ring->base_addr_ce_space);
1383 ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
1384 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
1385 ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
1386 ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
1387 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
1388
1389 ath10k_dbg(ar, ATH10K_DBG_BOOT,
1390 "boot init ce src ring id %d entries %d base_addr %pK\n",
1391 ce_id, nentries, src_ring->base_addr_owner_space);
1392
1393 return 0;
1394 }
1395
ath10k_ce_init_dest_ring(struct ath10k * ar,unsigned int ce_id,const struct ce_attr * attr)1396 static int ath10k_ce_init_dest_ring(struct ath10k *ar,
1397 unsigned int ce_id,
1398 const struct ce_attr *attr)
1399 {
1400 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1401 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1402 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
1403 u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1404
1405 nentries = roundup_pow_of_two(attr->dest_nentries);
1406
1407 if (ar->hw_params.target_64bit)
1408 memset(dest_ring->base_addr_owner_space, 0,
1409 nentries * sizeof(struct ce_desc_64));
1410 else
1411 memset(dest_ring->base_addr_owner_space, 0,
1412 nentries * sizeof(struct ce_desc));
1413
1414 dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
1415 dest_ring->sw_index &= dest_ring->nentries_mask;
1416 dest_ring->write_index =
1417 ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
1418 dest_ring->write_index &= dest_ring->nentries_mask;
1419
1420 ath10k_ce_dest_ring_base_addr_set(ar, ce_id,
1421 dest_ring->base_addr_ce_space);
1422 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
1423 ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
1424 ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
1425 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
1426
1427 ath10k_dbg(ar, ATH10K_DBG_BOOT,
1428 "boot ce dest ring id %d entries %d base_addr %pK\n",
1429 ce_id, nentries, dest_ring->base_addr_owner_space);
1430
1431 return 0;
1432 }
1433
ath10k_ce_alloc_shadow_base(struct ath10k * ar,struct ath10k_ce_ring * src_ring,u32 nentries)1434 static int ath10k_ce_alloc_shadow_base(struct ath10k *ar,
1435 struct ath10k_ce_ring *src_ring,
1436 u32 nentries)
1437 {
1438 src_ring->shadow_base_unaligned = kcalloc(nentries,
1439 sizeof(struct ce_desc_64),
1440 GFP_KERNEL);
1441 if (!src_ring->shadow_base_unaligned)
1442 return -ENOMEM;
1443
1444 src_ring->shadow_base = (struct ce_desc_64 *)
1445 PTR_ALIGN(src_ring->shadow_base_unaligned,
1446 CE_DESC_RING_ALIGN);
1447 return 0;
1448 }
1449
1450 static struct ath10k_ce_ring *
ath10k_ce_alloc_src_ring(struct ath10k * ar,unsigned int ce_id,const struct ce_attr * attr)1451 ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
1452 const struct ce_attr *attr)
1453 {
1454 struct ath10k_ce_ring *src_ring;
1455 u32 nentries = attr->src_nentries;
1456 dma_addr_t base_addr;
1457 int ret;
1458
1459 nentries = roundup_pow_of_two(nentries);
1460
1461 src_ring = kzalloc(struct_size(src_ring, per_transfer_context,
1462 nentries), GFP_KERNEL);
1463 if (src_ring == NULL)
1464 return ERR_PTR(-ENOMEM);
1465
1466 src_ring->nentries = nentries;
1467 src_ring->nentries_mask = nentries - 1;
1468
1469 /*
1470 * Legacy platforms that do not support cache
1471 * coherent DMA are unsupported
1472 */
1473 src_ring->base_addr_owner_space_unaligned =
1474 dma_alloc_coherent(ar->dev,
1475 (nentries * sizeof(struct ce_desc) +
1476 CE_DESC_RING_ALIGN),
1477 &base_addr, GFP_KERNEL);
1478 if (!src_ring->base_addr_owner_space_unaligned) {
1479 kfree(src_ring);
1480 return ERR_PTR(-ENOMEM);
1481 }
1482
1483 src_ring->base_addr_ce_space_unaligned = base_addr;
1484
1485 src_ring->base_addr_owner_space =
1486 PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
1487 CE_DESC_RING_ALIGN);
1488 src_ring->base_addr_ce_space =
1489 ALIGN(src_ring->base_addr_ce_space_unaligned,
1490 CE_DESC_RING_ALIGN);
1491
1492 if (ar->hw_params.shadow_reg_support) {
1493 ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
1494 if (ret) {
1495 dma_free_coherent(ar->dev,
1496 (nentries * sizeof(struct ce_desc) +
1497 CE_DESC_RING_ALIGN),
1498 src_ring->base_addr_owner_space_unaligned,
1499 base_addr);
1500 kfree(src_ring);
1501 return ERR_PTR(ret);
1502 }
1503 }
1504
1505 return src_ring;
1506 }
1507
1508 static struct ath10k_ce_ring *
ath10k_ce_alloc_src_ring_64(struct ath10k * ar,unsigned int ce_id,const struct ce_attr * attr)1509 ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id,
1510 const struct ce_attr *attr)
1511 {
1512 struct ath10k_ce_ring *src_ring;
1513 u32 nentries = attr->src_nentries;
1514 dma_addr_t base_addr;
1515 int ret;
1516
1517 nentries = roundup_pow_of_two(nentries);
1518
1519 src_ring = kzalloc(struct_size(src_ring, per_transfer_context,
1520 nentries), GFP_KERNEL);
1521 if (!src_ring)
1522 return ERR_PTR(-ENOMEM);
1523
1524 src_ring->nentries = nentries;
1525 src_ring->nentries_mask = nentries - 1;
1526
1527 /* Legacy platforms that do not support cache
1528 * coherent DMA are unsupported
1529 */
1530 src_ring->base_addr_owner_space_unaligned =
1531 dma_alloc_coherent(ar->dev,
1532 (nentries * sizeof(struct ce_desc_64) +
1533 CE_DESC_RING_ALIGN),
1534 &base_addr, GFP_KERNEL);
1535 if (!src_ring->base_addr_owner_space_unaligned) {
1536 kfree(src_ring);
1537 return ERR_PTR(-ENOMEM);
1538 }
1539
1540 src_ring->base_addr_ce_space_unaligned = base_addr;
1541
1542 src_ring->base_addr_owner_space =
1543 PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
1544 CE_DESC_RING_ALIGN);
1545 src_ring->base_addr_ce_space =
1546 ALIGN(src_ring->base_addr_ce_space_unaligned,
1547 CE_DESC_RING_ALIGN);
1548
1549 if (ar->hw_params.shadow_reg_support) {
1550 ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
1551 if (ret) {
1552 dma_free_coherent(ar->dev,
1553 (nentries * sizeof(struct ce_desc_64) +
1554 CE_DESC_RING_ALIGN),
1555 src_ring->base_addr_owner_space_unaligned,
1556 base_addr);
1557 kfree(src_ring);
1558 return ERR_PTR(ret);
1559 }
1560 }
1561
1562 return src_ring;
1563 }
1564
1565 static struct ath10k_ce_ring *
ath10k_ce_alloc_dest_ring(struct ath10k * ar,unsigned int ce_id,const struct ce_attr * attr)1566 ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
1567 const struct ce_attr *attr)
1568 {
1569 struct ath10k_ce_ring *dest_ring;
1570 u32 nentries;
1571 dma_addr_t base_addr;
1572
1573 nentries = roundup_pow_of_two(attr->dest_nentries);
1574
1575 dest_ring = kzalloc(struct_size(dest_ring, per_transfer_context,
1576 nentries), GFP_KERNEL);
1577 if (dest_ring == NULL)
1578 return ERR_PTR(-ENOMEM);
1579
1580 dest_ring->nentries = nentries;
1581 dest_ring->nentries_mask = nentries - 1;
1582
1583 /*
1584 * Legacy platforms that do not support cache
1585 * coherent DMA are unsupported
1586 */
1587 dest_ring->base_addr_owner_space_unaligned =
1588 dma_alloc_coherent(ar->dev,
1589 (nentries * sizeof(struct ce_desc) +
1590 CE_DESC_RING_ALIGN),
1591 &base_addr, GFP_KERNEL);
1592 if (!dest_ring->base_addr_owner_space_unaligned) {
1593 kfree(dest_ring);
1594 return ERR_PTR(-ENOMEM);
1595 }
1596
1597 dest_ring->base_addr_ce_space_unaligned = base_addr;
1598
1599 dest_ring->base_addr_owner_space =
1600 PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
1601 CE_DESC_RING_ALIGN);
1602 dest_ring->base_addr_ce_space =
1603 ALIGN(dest_ring->base_addr_ce_space_unaligned,
1604 CE_DESC_RING_ALIGN);
1605
1606 return dest_ring;
1607 }
1608
1609 static struct ath10k_ce_ring *
ath10k_ce_alloc_dest_ring_64(struct ath10k * ar,unsigned int ce_id,const struct ce_attr * attr)1610 ath10k_ce_alloc_dest_ring_64(struct ath10k *ar, unsigned int ce_id,
1611 const struct ce_attr *attr)
1612 {
1613 struct ath10k_ce_ring *dest_ring;
1614 u32 nentries;
1615 dma_addr_t base_addr;
1616
1617 nentries = roundup_pow_of_two(attr->dest_nentries);
1618
1619 dest_ring = kzalloc(struct_size(dest_ring, per_transfer_context,
1620 nentries), GFP_KERNEL);
1621 if (!dest_ring)
1622 return ERR_PTR(-ENOMEM);
1623
1624 dest_ring->nentries = nentries;
1625 dest_ring->nentries_mask = nentries - 1;
1626
1627 /* Legacy platforms that do not support cache
1628 * coherent DMA are unsupported
1629 */
1630 dest_ring->base_addr_owner_space_unaligned =
1631 dma_alloc_coherent(ar->dev,
1632 (nentries * sizeof(struct ce_desc_64) +
1633 CE_DESC_RING_ALIGN),
1634 &base_addr, GFP_KERNEL);
1635 if (!dest_ring->base_addr_owner_space_unaligned) {
1636 kfree(dest_ring);
1637 return ERR_PTR(-ENOMEM);
1638 }
1639
1640 dest_ring->base_addr_ce_space_unaligned = base_addr;
1641
1642 /* Correctly initialize memory to 0 to prevent garbage
1643 * data crashing system when download firmware
1644 */
1645 dest_ring->base_addr_owner_space =
1646 PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
1647 CE_DESC_RING_ALIGN);
1648 dest_ring->base_addr_ce_space =
1649 ALIGN(dest_ring->base_addr_ce_space_unaligned,
1650 CE_DESC_RING_ALIGN);
1651
1652 return dest_ring;
1653 }
1654
1655 /*
1656 * Initialize a Copy Engine based on caller-supplied attributes.
1657 * This may be called once to initialize both source and destination
1658 * rings or it may be called twice for separate source and destination
1659 * initialization. It may be that only one side or the other is
1660 * initialized by software/firmware.
1661 */
ath10k_ce_init_pipe(struct ath10k * ar,unsigned int ce_id,const struct ce_attr * attr)1662 int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
1663 const struct ce_attr *attr)
1664 {
1665 int ret;
1666
1667 if (attr->src_nentries) {
1668 ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
1669 if (ret) {
1670 ath10k_err(ar, "Failed to initialize CE src ring for ID: %d (%d)\n",
1671 ce_id, ret);
1672 return ret;
1673 }
1674 }
1675
1676 if (attr->dest_nentries) {
1677 ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
1678 if (ret) {
1679 ath10k_err(ar, "Failed to initialize CE dest ring for ID: %d (%d)\n",
1680 ce_id, ret);
1681 return ret;
1682 }
1683 }
1684
1685 return 0;
1686 }
1687 EXPORT_SYMBOL(ath10k_ce_init_pipe);
1688
ath10k_ce_deinit_src_ring(struct ath10k * ar,unsigned int ce_id)1689 static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
1690 {
1691 u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1692
1693 ath10k_ce_src_ring_base_addr_set(ar, ce_id, 0);
1694 ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
1695 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
1696 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
1697 }
1698
ath10k_ce_deinit_dest_ring(struct ath10k * ar,unsigned int ce_id)1699 static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
1700 {
1701 u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1702
1703 ath10k_ce_dest_ring_base_addr_set(ar, ce_id, 0);
1704 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
1705 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
1706 }
1707
ath10k_ce_deinit_pipe(struct ath10k * ar,unsigned int ce_id)1708 void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
1709 {
1710 ath10k_ce_deinit_src_ring(ar, ce_id);
1711 ath10k_ce_deinit_dest_ring(ar, ce_id);
1712 }
1713 EXPORT_SYMBOL(ath10k_ce_deinit_pipe);
1714
_ath10k_ce_free_pipe(struct ath10k * ar,int ce_id)1715 static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
1716 {
1717 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1718 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1719
1720 if (ce_state->src_ring) {
1721 if (ar->hw_params.shadow_reg_support)
1722 kfree(ce_state->src_ring->shadow_base_unaligned);
1723 dma_free_coherent(ar->dev,
1724 (ce_state->src_ring->nentries *
1725 sizeof(struct ce_desc) +
1726 CE_DESC_RING_ALIGN),
1727 ce_state->src_ring->base_addr_owner_space,
1728 ce_state->src_ring->base_addr_ce_space);
1729 kfree(ce_state->src_ring);
1730 }
1731
1732 if (ce_state->dest_ring) {
1733 dma_free_coherent(ar->dev,
1734 (ce_state->dest_ring->nentries *
1735 sizeof(struct ce_desc) +
1736 CE_DESC_RING_ALIGN),
1737 ce_state->dest_ring->base_addr_owner_space,
1738 ce_state->dest_ring->base_addr_ce_space);
1739 kfree(ce_state->dest_ring);
1740 }
1741
1742 ce_state->src_ring = NULL;
1743 ce_state->dest_ring = NULL;
1744 }
1745
_ath10k_ce_free_pipe_64(struct ath10k * ar,int ce_id)1746 static void _ath10k_ce_free_pipe_64(struct ath10k *ar, int ce_id)
1747 {
1748 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1749 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1750
1751 if (ce_state->src_ring) {
1752 if (ar->hw_params.shadow_reg_support)
1753 kfree(ce_state->src_ring->shadow_base_unaligned);
1754 dma_free_coherent(ar->dev,
1755 (ce_state->src_ring->nentries *
1756 sizeof(struct ce_desc_64) +
1757 CE_DESC_RING_ALIGN),
1758 ce_state->src_ring->base_addr_owner_space,
1759 ce_state->src_ring->base_addr_ce_space);
1760 kfree(ce_state->src_ring);
1761 }
1762
1763 if (ce_state->dest_ring) {
1764 dma_free_coherent(ar->dev,
1765 (ce_state->dest_ring->nentries *
1766 sizeof(struct ce_desc_64) +
1767 CE_DESC_RING_ALIGN),
1768 ce_state->dest_ring->base_addr_owner_space,
1769 ce_state->dest_ring->base_addr_ce_space);
1770 kfree(ce_state->dest_ring);
1771 }
1772
1773 ce_state->src_ring = NULL;
1774 ce_state->dest_ring = NULL;
1775 }
1776
ath10k_ce_free_pipe(struct ath10k * ar,int ce_id)1777 void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
1778 {
1779 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1780 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1781
1782 ce_state->ops->ce_free_pipe(ar, ce_id);
1783 }
1784 EXPORT_SYMBOL(ath10k_ce_free_pipe);
1785
ath10k_ce_dump_registers(struct ath10k * ar,struct ath10k_fw_crash_data * crash_data)1786 void ath10k_ce_dump_registers(struct ath10k *ar,
1787 struct ath10k_fw_crash_data *crash_data)
1788 {
1789 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1790 struct ath10k_ce_crash_data ce_data;
1791 u32 addr, id;
1792
1793 lockdep_assert_held(&ar->dump_mutex);
1794
1795 ath10k_err(ar, "Copy Engine register dump:\n");
1796
1797 spin_lock_bh(&ce->ce_lock);
1798 for (id = 0; id < CE_COUNT; id++) {
1799 addr = ath10k_ce_base_address(ar, id);
1800 ce_data.base_addr = cpu_to_le32(addr);
1801
1802 ce_data.src_wr_idx =
1803 cpu_to_le32(ath10k_ce_src_ring_write_index_get(ar, addr));
1804 ce_data.src_r_idx =
1805 cpu_to_le32(ath10k_ce_src_ring_read_index_get(ar, addr));
1806 ce_data.dst_wr_idx =
1807 cpu_to_le32(ath10k_ce_dest_ring_write_index_get(ar, addr));
1808 ce_data.dst_r_idx =
1809 cpu_to_le32(ath10k_ce_dest_ring_read_index_get(ar, addr));
1810
1811 if (crash_data)
1812 crash_data->ce_crash_data[id] = ce_data;
1813
1814 #if defined(__linux__)
1815 ath10k_err(ar, "[%02d]: 0x%08x %3u %3u %3u %3u", id,
1816 #elif defined(__FreeBSD__)
1817 ath10k_err(ar, "[%02d]: 0x%08x %3u %3u %3u %3u\n", id,
1818 #endif
1819 le32_to_cpu(ce_data.base_addr),
1820 le32_to_cpu(ce_data.src_wr_idx),
1821 le32_to_cpu(ce_data.src_r_idx),
1822 le32_to_cpu(ce_data.dst_wr_idx),
1823 le32_to_cpu(ce_data.dst_r_idx));
1824 }
1825
1826 spin_unlock_bh(&ce->ce_lock);
1827 }
1828 EXPORT_SYMBOL(ath10k_ce_dump_registers);
1829
1830 static const struct ath10k_ce_ops ce_ops = {
1831 .ce_alloc_src_ring = ath10k_ce_alloc_src_ring,
1832 .ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring,
1833 .ce_rx_post_buf = __ath10k_ce_rx_post_buf,
1834 .ce_completed_recv_next_nolock = _ath10k_ce_completed_recv_next_nolock,
1835 .ce_revoke_recv_next = _ath10k_ce_revoke_recv_next,
1836 .ce_extract_desc_data = ath10k_ce_extract_desc_data,
1837 .ce_free_pipe = _ath10k_ce_free_pipe,
1838 .ce_send_nolock = _ath10k_ce_send_nolock,
1839 .ce_set_src_ring_base_addr_hi = NULL,
1840 .ce_set_dest_ring_base_addr_hi = NULL,
1841 .ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock,
1842 };
1843
1844 static const struct ath10k_ce_ops ce_64_ops = {
1845 .ce_alloc_src_ring = ath10k_ce_alloc_src_ring_64,
1846 .ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring_64,
1847 .ce_rx_post_buf = __ath10k_ce_rx_post_buf_64,
1848 .ce_completed_recv_next_nolock =
1849 _ath10k_ce_completed_recv_next_nolock_64,
1850 .ce_revoke_recv_next = _ath10k_ce_revoke_recv_next_64,
1851 .ce_extract_desc_data = ath10k_ce_extract_desc_data_64,
1852 .ce_free_pipe = _ath10k_ce_free_pipe_64,
1853 .ce_send_nolock = _ath10k_ce_send_nolock_64,
1854 .ce_set_src_ring_base_addr_hi = ath10k_ce_set_src_ring_base_addr_hi,
1855 .ce_set_dest_ring_base_addr_hi = ath10k_ce_set_dest_ring_base_addr_hi,
1856 .ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock_64,
1857 };
1858
ath10k_ce_set_ops(struct ath10k * ar,struct ath10k_ce_pipe * ce_state)1859 static void ath10k_ce_set_ops(struct ath10k *ar,
1860 struct ath10k_ce_pipe *ce_state)
1861 {
1862 switch (ar->hw_rev) {
1863 case ATH10K_HW_WCN3990:
1864 ce_state->ops = &ce_64_ops;
1865 break;
1866 default:
1867 ce_state->ops = &ce_ops;
1868 break;
1869 }
1870 }
1871
ath10k_ce_alloc_pipe(struct ath10k * ar,int ce_id,const struct ce_attr * attr)1872 int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
1873 const struct ce_attr *attr)
1874 {
1875 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1876 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1877 int ret;
1878
1879 ath10k_ce_set_ops(ar, ce_state);
1880 /* Make sure there's enough CE ringbuffer entries for HTT TX to avoid
1881 * additional TX locking checks.
1882 *
1883 * For the lack of a better place do the check here.
1884 */
1885 BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
1886 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1887 BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC >
1888 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1889 BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
1890 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1891
1892 ce_state->ar = ar;
1893 ce_state->id = ce_id;
1894 ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1895 ce_state->attr_flags = attr->flags;
1896 ce_state->src_sz_max = attr->src_sz_max;
1897
1898 if (attr->src_nentries)
1899 ce_state->send_cb = attr->send_cb;
1900
1901 if (attr->dest_nentries)
1902 ce_state->recv_cb = attr->recv_cb;
1903
1904 if (attr->src_nentries) {
1905 ce_state->src_ring =
1906 ce_state->ops->ce_alloc_src_ring(ar, ce_id, attr);
1907 if (IS_ERR(ce_state->src_ring)) {
1908 ret = PTR_ERR(ce_state->src_ring);
1909 ath10k_err(ar, "failed to alloc CE src ring %d: %d\n",
1910 ce_id, ret);
1911 ce_state->src_ring = NULL;
1912 return ret;
1913 }
1914 }
1915
1916 if (attr->dest_nentries) {
1917 ce_state->dest_ring = ce_state->ops->ce_alloc_dst_ring(ar,
1918 ce_id,
1919 attr);
1920 if (IS_ERR(ce_state->dest_ring)) {
1921 ret = PTR_ERR(ce_state->dest_ring);
1922 ath10k_err(ar, "failed to alloc CE dest ring %d: %d\n",
1923 ce_id, ret);
1924 ce_state->dest_ring = NULL;
1925 return ret;
1926 }
1927 }
1928
1929 return 0;
1930 }
1931 EXPORT_SYMBOL(ath10k_ce_alloc_pipe);
1932
ath10k_ce_alloc_rri(struct ath10k * ar)1933 void ath10k_ce_alloc_rri(struct ath10k *ar)
1934 {
1935 int i;
1936 u32 value;
1937 u32 ctrl1_regs;
1938 u32 ce_base_addr;
1939 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1940
1941 ce->vaddr_rri = dma_alloc_coherent(ar->dev,
1942 (CE_COUNT * sizeof(u32)),
1943 &ce->paddr_rri, GFP_KERNEL);
1944
1945 if (!ce->vaddr_rri)
1946 return;
1947
1948 ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_low,
1949 lower_32_bits(ce->paddr_rri));
1950 ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_high,
1951 (upper_32_bits(ce->paddr_rri) &
1952 CE_DESC_ADDR_HI_MASK));
1953
1954 for (i = 0; i < CE_COUNT; i++) {
1955 ctrl1_regs = ar->hw_ce_regs->ctrl1_regs->addr;
1956 ce_base_addr = ath10k_ce_base_address(ar, i);
1957 value = ath10k_ce_read32(ar, ce_base_addr + ctrl1_regs);
1958 value |= ar->hw_ce_regs->upd->mask;
1959 ath10k_ce_write32(ar, ce_base_addr + ctrl1_regs, value);
1960 }
1961 }
1962 EXPORT_SYMBOL(ath10k_ce_alloc_rri);
1963
ath10k_ce_free_rri(struct ath10k * ar)1964 void ath10k_ce_free_rri(struct ath10k *ar)
1965 {
1966 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1967
1968 dma_free_coherent(ar->dev, (CE_COUNT * sizeof(u32)),
1969 ce->vaddr_rri,
1970 ce->paddr_rri);
1971 }
1972 EXPORT_SYMBOL(ath10k_ce_free_rri);
1973