xref: /linux/drivers/net/wireless/ath/ath10k/ce.c (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (c) 2005-2011 Atheros Communications Inc.
4  * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5  * Copyright (c) 2018 The Linux Foundation. All rights reserved.
6  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
7  * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
8  */
9 
10 #include <linux/export.h>
11 #include "hif.h"
12 #include "ce.h"
13 #include "debug.h"
14 
15 /*
16  * Support for Copy Engine hardware, which is mainly used for
17  * communication between Host and Target over a PCIe interconnect.
18  */
19 
20 /*
21  * A single CopyEngine (CE) comprises two "rings":
22  *   a source ring
23  *   a destination ring
24  *
25  * Each ring consists of a number of descriptors which specify
26  * an address, length, and meta-data.
27  *
28  * Typically, one side of the PCIe/AHB/SNOC interconnect (Host or Target)
29  * controls one ring and the other side controls the other ring.
30  * The source side chooses when to initiate a transfer and it
31  * chooses what to send (buffer address, length). The destination
32  * side keeps a supply of "anonymous receive buffers" available and
33  * it handles incoming data as it arrives (when the destination
34  * receives an interrupt).
35  *
36  * The sender may send a simple buffer (address/length) or it may
37  * send a small list of buffers.  When a small list is sent, hardware
38  * "gathers" these and they end up in a single destination buffer
39  * with a single interrupt.
40  *
41  * There are several "contexts" managed by this layer -- more, it
42  * may seem -- than should be needed. These are provided mainly for
43  * maximum flexibility and especially to facilitate a simpler HIF
44  * implementation. There are per-CopyEngine recv, send, and watermark
45  * contexts. These are supplied by the caller when a recv, send,
46  * or watermark handler is established and they are echoed back to
47  * the caller when the respective callbacks are invoked. There is
48  * also a per-transfer context supplied by the caller when a buffer
49  * (or sendlist) is sent and when a buffer is enqueued for recv.
50  * These per-transfer contexts are echoed back to the caller when
51  * the buffer is sent/received.
52  */
53 
shadow_sr_wr_ind_addr(struct ath10k * ar,struct ath10k_ce_pipe * ce_state)54 static inline u32 shadow_sr_wr_ind_addr(struct ath10k *ar,
55 					struct ath10k_ce_pipe *ce_state)
56 {
57 	u32 ce_id = ce_state->id;
58 	u32 addr = 0;
59 
60 	switch (ce_id) {
61 	case 0:
62 		addr = 0x00032000;
63 		break;
64 	case 3:
65 		addr = 0x0003200C;
66 		break;
67 	case 4:
68 		addr = 0x00032010;
69 		break;
70 	case 5:
71 		addr = 0x00032014;
72 		break;
73 	case 7:
74 		addr = 0x0003201C;
75 		break;
76 	default:
77 		ath10k_warn(ar, "invalid CE id: %d", ce_id);
78 		break;
79 	}
80 	return addr;
81 }
82 
83 static inline unsigned int
ath10k_set_ring_byte(unsigned int offset,const struct ath10k_hw_ce_regs_addr_map * addr_map)84 ath10k_set_ring_byte(unsigned int offset,
85 		     const struct ath10k_hw_ce_regs_addr_map *addr_map)
86 {
87 	return ((offset << addr_map->lsb) & addr_map->mask);
88 }
89 
ath10k_ce_read32(struct ath10k * ar,u32 offset)90 static inline u32 ath10k_ce_read32(struct ath10k *ar, u32 offset)
91 {
92 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
93 
94 	return ce->bus_ops->read32(ar, offset);
95 }
96 
ath10k_ce_write32(struct ath10k * ar,u32 offset,u32 value)97 static inline void ath10k_ce_write32(struct ath10k *ar, u32 offset, u32 value)
98 {
99 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
100 
101 	ce->bus_ops->write32(ar, offset, value);
102 }
103 
ath10k_ce_dest_ring_write_index_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)104 static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
105 						       u32 ce_ctrl_addr,
106 						       unsigned int n)
107 {
108 	ath10k_ce_write32(ar, ce_ctrl_addr +
109 			  ar->hw_ce_regs->dst_wr_index_addr, n);
110 }
111 
ath10k_ce_dest_ring_write_index_get(struct ath10k * ar,u32 ce_ctrl_addr)112 static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
113 						      u32 ce_ctrl_addr)
114 {
115 	return ath10k_ce_read32(ar, ce_ctrl_addr +
116 				ar->hw_ce_regs->dst_wr_index_addr);
117 }
118 
ath10k_ce_src_ring_write_index_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)119 static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
120 						      u32 ce_ctrl_addr,
121 						      unsigned int n)
122 {
123 	ath10k_ce_write32(ar, ce_ctrl_addr +
124 			  ar->hw_ce_regs->sr_wr_index_addr, n);
125 }
126 
ath10k_ce_src_ring_write_index_get(struct ath10k * ar,u32 ce_ctrl_addr)127 static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
128 						     u32 ce_ctrl_addr)
129 {
130 	return ath10k_ce_read32(ar, ce_ctrl_addr +
131 				ar->hw_ce_regs->sr_wr_index_addr);
132 }
133 
ath10k_ce_src_ring_read_index_from_ddr(struct ath10k * ar,u32 ce_id)134 static inline u32 ath10k_ce_src_ring_read_index_from_ddr(struct ath10k *ar,
135 							 u32 ce_id)
136 {
137 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
138 
139 	return ce->vaddr_rri[ce_id] & CE_DDR_RRI_MASK;
140 }
141 
ath10k_ce_src_ring_read_index_get(struct ath10k * ar,u32 ce_ctrl_addr)142 static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
143 						    u32 ce_ctrl_addr)
144 {
145 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
146 	u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr);
147 	struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
148 	u32 index;
149 
150 	if (ar->hw_params.rri_on_ddr &&
151 	    (ce_state->attr_flags & CE_ATTR_DIS_INTR))
152 		index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_id);
153 	else
154 		index = ath10k_ce_read32(ar, ce_ctrl_addr +
155 					 ar->hw_ce_regs->current_srri_addr);
156 
157 	return index;
158 }
159 
160 static inline void
ath10k_ce_shadow_src_ring_write_index_set(struct ath10k * ar,struct ath10k_ce_pipe * ce_state,unsigned int value)161 ath10k_ce_shadow_src_ring_write_index_set(struct ath10k *ar,
162 					  struct ath10k_ce_pipe *ce_state,
163 					  unsigned int value)
164 {
165 	ath10k_ce_write32(ar, shadow_sr_wr_ind_addr(ar, ce_state), value);
166 }
167 
ath10k_ce_src_ring_base_addr_set(struct ath10k * ar,u32 ce_id,u64 addr)168 static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
169 						    u32 ce_id,
170 						    u64 addr)
171 {
172 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
173 	struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
174 	u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id);
175 	u32 addr_lo = lower_32_bits(addr);
176 
177 	ath10k_ce_write32(ar, ce_ctrl_addr +
178 			  ar->hw_ce_regs->sr_base_addr_lo, addr_lo);
179 
180 	if (ce_state->ops->ce_set_src_ring_base_addr_hi) {
181 		ce_state->ops->ce_set_src_ring_base_addr_hi(ar, ce_ctrl_addr,
182 							    addr);
183 	}
184 }
185 
ath10k_ce_set_src_ring_base_addr_hi(struct ath10k * ar,u32 ce_ctrl_addr,u64 addr)186 static void ath10k_ce_set_src_ring_base_addr_hi(struct ath10k *ar,
187 						u32 ce_ctrl_addr,
188 						u64 addr)
189 {
190 	u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK;
191 
192 	ath10k_ce_write32(ar, ce_ctrl_addr +
193 			  ar->hw_ce_regs->sr_base_addr_hi, addr_hi);
194 }
195 
ath10k_ce_src_ring_size_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)196 static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
197 					       u32 ce_ctrl_addr,
198 					       unsigned int n)
199 {
200 	ath10k_ce_write32(ar, ce_ctrl_addr +
201 			  ar->hw_ce_regs->sr_size_addr, n);
202 }
203 
ath10k_ce_src_ring_dmax_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)204 static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
205 					       u32 ce_ctrl_addr,
206 					       unsigned int n)
207 {
208 	const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
209 
210 	u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
211 					  ctrl_regs->addr);
212 
213 	ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
214 			  (ctrl1_addr &  ~(ctrl_regs->dmax->mask)) |
215 			  ath10k_set_ring_byte(n, ctrl_regs->dmax));
216 }
217 
ath10k_ce_src_ring_byte_swap_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)218 static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
219 						    u32 ce_ctrl_addr,
220 						    unsigned int n)
221 {
222 	const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
223 
224 	u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
225 					  ctrl_regs->addr);
226 
227 	ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
228 			  (ctrl1_addr & ~(ctrl_regs->src_ring->mask)) |
229 			  ath10k_set_ring_byte(n, ctrl_regs->src_ring));
230 }
231 
ath10k_ce_dest_ring_byte_swap_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)232 static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
233 						     u32 ce_ctrl_addr,
234 						     unsigned int n)
235 {
236 	const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
237 
238 	u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
239 					  ctrl_regs->addr);
240 
241 	ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
242 			  (ctrl1_addr & ~(ctrl_regs->dst_ring->mask)) |
243 			  ath10k_set_ring_byte(n, ctrl_regs->dst_ring));
244 }
245 
246 static inline
ath10k_ce_dest_ring_read_index_from_ddr(struct ath10k * ar,u32 ce_id)247 	u32 ath10k_ce_dest_ring_read_index_from_ddr(struct ath10k *ar, u32 ce_id)
248 {
249 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
250 
251 	return (ce->vaddr_rri[ce_id] >> CE_DDR_DRRI_SHIFT) &
252 		CE_DDR_RRI_MASK;
253 }
254 
ath10k_ce_dest_ring_read_index_get(struct ath10k * ar,u32 ce_ctrl_addr)255 static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
256 						     u32 ce_ctrl_addr)
257 {
258 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
259 	u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr);
260 	struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
261 	u32 index;
262 
263 	if (ar->hw_params.rri_on_ddr &&
264 	    (ce_state->attr_flags & CE_ATTR_DIS_INTR))
265 		index = ath10k_ce_dest_ring_read_index_from_ddr(ar, ce_id);
266 	else
267 		index = ath10k_ce_read32(ar, ce_ctrl_addr +
268 					 ar->hw_ce_regs->current_drri_addr);
269 
270 	return index;
271 }
272 
ath10k_ce_dest_ring_base_addr_set(struct ath10k * ar,u32 ce_id,u64 addr)273 static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
274 						     u32 ce_id,
275 						     u64 addr)
276 {
277 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
278 	struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
279 	u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id);
280 	u32 addr_lo = lower_32_bits(addr);
281 
282 	ath10k_ce_write32(ar, ce_ctrl_addr +
283 			  ar->hw_ce_regs->dr_base_addr_lo, addr_lo);
284 
285 	if (ce_state->ops->ce_set_dest_ring_base_addr_hi) {
286 		ce_state->ops->ce_set_dest_ring_base_addr_hi(ar, ce_ctrl_addr,
287 							     addr);
288 	}
289 }
290 
ath10k_ce_set_dest_ring_base_addr_hi(struct ath10k * ar,u32 ce_ctrl_addr,u64 addr)291 static void ath10k_ce_set_dest_ring_base_addr_hi(struct ath10k *ar,
292 						 u32 ce_ctrl_addr,
293 						 u64 addr)
294 {
295 	u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK;
296 	u32 reg_value;
297 
298 	reg_value = ath10k_ce_read32(ar, ce_ctrl_addr +
299 				     ar->hw_ce_regs->dr_base_addr_hi);
300 	reg_value &= ~CE_DESC_ADDR_HI_MASK;
301 	reg_value |= addr_hi;
302 	ath10k_ce_write32(ar, ce_ctrl_addr +
303 			  ar->hw_ce_regs->dr_base_addr_hi, reg_value);
304 }
305 
ath10k_ce_dest_ring_size_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)306 static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
307 						u32 ce_ctrl_addr,
308 						unsigned int n)
309 {
310 	ath10k_ce_write32(ar, ce_ctrl_addr +
311 			  ar->hw_ce_regs->dr_size_addr, n);
312 }
313 
ath10k_ce_src_ring_highmark_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)314 static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
315 						   u32 ce_ctrl_addr,
316 						   unsigned int n)
317 {
318 	const struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
319 	u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
320 
321 	ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
322 			  (addr & ~(srcr_wm->wm_high->mask)) |
323 			  (ath10k_set_ring_byte(n, srcr_wm->wm_high)));
324 }
325 
ath10k_ce_src_ring_lowmark_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)326 static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
327 						  u32 ce_ctrl_addr,
328 						  unsigned int n)
329 {
330 	const struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
331 	u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
332 
333 	ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
334 			  (addr & ~(srcr_wm->wm_low->mask)) |
335 			  (ath10k_set_ring_byte(n, srcr_wm->wm_low)));
336 }
337 
ath10k_ce_dest_ring_highmark_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)338 static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
339 						    u32 ce_ctrl_addr,
340 						    unsigned int n)
341 {
342 	const struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
343 	u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
344 
345 	ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
346 			  (addr & ~(dstr_wm->wm_high->mask)) |
347 			  (ath10k_set_ring_byte(n, dstr_wm->wm_high)));
348 }
349 
ath10k_ce_dest_ring_lowmark_set(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int n)350 static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
351 						   u32 ce_ctrl_addr,
352 						   unsigned int n)
353 {
354 	const struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
355 	u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
356 
357 	ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
358 			  (addr & ~(dstr_wm->wm_low->mask)) |
359 			  (ath10k_set_ring_byte(n, dstr_wm->wm_low)));
360 }
361 
ath10k_ce_copy_complete_inter_enable(struct ath10k * ar,u32 ce_ctrl_addr)362 static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
363 							u32 ce_ctrl_addr)
364 {
365 	const struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
366 
367 	u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
368 					    ar->hw_ce_regs->host_ie_addr);
369 
370 	ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
371 			  host_ie_addr | host_ie->copy_complete->mask);
372 }
373 
ath10k_ce_copy_complete_intr_disable(struct ath10k * ar,u32 ce_ctrl_addr)374 static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
375 							u32 ce_ctrl_addr)
376 {
377 	const struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
378 
379 	u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
380 					    ar->hw_ce_regs->host_ie_addr);
381 
382 	ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
383 			  host_ie_addr & ~(host_ie->copy_complete->mask));
384 }
385 
ath10k_ce_watermark_intr_disable(struct ath10k * ar,u32 ce_ctrl_addr)386 static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
387 						    u32 ce_ctrl_addr)
388 {
389 	const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
390 
391 	u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
392 					    ar->hw_ce_regs->host_ie_addr);
393 
394 	ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
395 			  host_ie_addr & ~(wm_regs->wm_mask));
396 }
397 
ath10k_ce_error_intr_disable(struct ath10k * ar,u32 ce_ctrl_addr)398 static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
399 						u32 ce_ctrl_addr)
400 {
401 	const struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
402 
403 	u32 misc_ie_addr = ath10k_ce_read32(ar,
404 			ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr);
405 
406 	ath10k_ce_write32(ar,
407 			  ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr,
408 			  misc_ie_addr & ~(misc_regs->err_mask));
409 }
410 
ath10k_ce_engine_int_status_clear(struct ath10k * ar,u32 ce_ctrl_addr,unsigned int mask)411 static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
412 						     u32 ce_ctrl_addr,
413 						     unsigned int mask)
414 {
415 	const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
416 
417 	ath10k_ce_write32(ar, ce_ctrl_addr + wm_regs->addr, mask);
418 }
419 
420 /*
421  * Guts of ath10k_ce_send.
422  * The caller takes responsibility for any needed locking.
423  */
_ath10k_ce_send_nolock(struct ath10k_ce_pipe * ce_state,void * per_transfer_context,dma_addr_t buffer,unsigned int nbytes,unsigned int transfer_id,unsigned int flags)424 static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
425 				  void *per_transfer_context,
426 				  dma_addr_t buffer,
427 				  unsigned int nbytes,
428 				  unsigned int transfer_id,
429 				  unsigned int flags)
430 {
431 	struct ath10k *ar = ce_state->ar;
432 	struct ath10k_ce_ring *src_ring = ce_state->src_ring;
433 	struct ce_desc *desc, sdesc;
434 	unsigned int nentries_mask = src_ring->nentries_mask;
435 	unsigned int sw_index = src_ring->sw_index;
436 	unsigned int write_index = src_ring->write_index;
437 	u32 ctrl_addr = ce_state->ctrl_addr;
438 	u32 desc_flags = 0;
439 	int ret = 0;
440 
441 	if (nbytes > ce_state->src_sz_max)
442 		ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
443 			    __func__, nbytes, ce_state->src_sz_max);
444 
445 	if (unlikely(CE_RING_DELTA(nentries_mask,
446 				   write_index, sw_index - 1) <= 0)) {
447 		ret = -ENOSR;
448 		goto exit;
449 	}
450 
451 	desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
452 				   write_index);
453 
454 	desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
455 
456 	if (flags & CE_SEND_FLAG_GATHER)
457 		desc_flags |= CE_DESC_FLAGS_GATHER;
458 	if (flags & CE_SEND_FLAG_BYTE_SWAP)
459 		desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
460 
461 	sdesc.addr   = __cpu_to_le32(buffer);
462 	sdesc.nbytes = __cpu_to_le16(nbytes);
463 	sdesc.flags  = __cpu_to_le16(desc_flags);
464 
465 	*desc = sdesc;
466 
467 	src_ring->per_transfer_context[write_index] = per_transfer_context;
468 
469 	/* Update Source Ring Write Index */
470 	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
471 
472 	/* WORKAROUND */
473 	if (!(flags & CE_SEND_FLAG_GATHER))
474 		ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
475 
476 	src_ring->write_index = write_index;
477 exit:
478 	return ret;
479 }
480 
_ath10k_ce_send_nolock_64(struct ath10k_ce_pipe * ce_state,void * per_transfer_context,dma_addr_t buffer,unsigned int nbytes,unsigned int transfer_id,unsigned int flags)481 static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state,
482 				     void *per_transfer_context,
483 				     dma_addr_t buffer,
484 				     unsigned int nbytes,
485 				     unsigned int transfer_id,
486 				     unsigned int flags)
487 {
488 	struct ath10k *ar = ce_state->ar;
489 	struct ath10k_ce_ring *src_ring = ce_state->src_ring;
490 	struct ce_desc_64 *desc, sdesc;
491 	unsigned int nentries_mask = src_ring->nentries_mask;
492 	unsigned int sw_index;
493 	unsigned int write_index = src_ring->write_index;
494 	u32 ctrl_addr = ce_state->ctrl_addr;
495 	__le32 *addr;
496 	u32 desc_flags = 0;
497 	int ret = 0;
498 
499 	if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
500 		return -ESHUTDOWN;
501 
502 	if (nbytes > ce_state->src_sz_max)
503 		ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
504 			    __func__, nbytes, ce_state->src_sz_max);
505 
506 	if (ar->hw_params.rri_on_ddr)
507 		sw_index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_state->id);
508 	else
509 		sw_index = src_ring->sw_index;
510 
511 	if (unlikely(CE_RING_DELTA(nentries_mask,
512 				   write_index, sw_index - 1) <= 0)) {
513 		ret = -ENOSR;
514 		goto exit;
515 	}
516 
517 	desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space,
518 				      write_index);
519 
520 	desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
521 
522 	if (flags & CE_SEND_FLAG_GATHER)
523 		desc_flags |= CE_DESC_FLAGS_GATHER;
524 
525 	if (flags & CE_SEND_FLAG_BYTE_SWAP)
526 		desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
527 
528 	addr = (__le32 *)&sdesc.addr;
529 
530 	flags |= upper_32_bits(buffer) & CE_DESC_ADDR_HI_MASK;
531 	addr[0] = __cpu_to_le32(buffer);
532 	addr[1] = __cpu_to_le32(flags);
533 	if (flags & CE_SEND_FLAG_GATHER)
534 		addr[1] |= __cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER);
535 	else
536 		addr[1] &= ~(__cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER));
537 
538 	sdesc.nbytes = __cpu_to_le16(nbytes);
539 	sdesc.flags  = __cpu_to_le16(desc_flags);
540 
541 	*desc = sdesc;
542 
543 	src_ring->per_transfer_context[write_index] = per_transfer_context;
544 
545 	/* Update Source Ring Write Index */
546 	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
547 
548 	if (!(flags & CE_SEND_FLAG_GATHER)) {
549 		if (ar->hw_params.shadow_reg_support)
550 			ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state,
551 								  write_index);
552 		else
553 			ath10k_ce_src_ring_write_index_set(ar, ctrl_addr,
554 							   write_index);
555 	}
556 
557 	src_ring->write_index = write_index;
558 exit:
559 	return ret;
560 }
561 
ath10k_ce_send_nolock(struct ath10k_ce_pipe * ce_state,void * per_transfer_context,dma_addr_t buffer,unsigned int nbytes,unsigned int transfer_id,unsigned int flags)562 int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
563 			  void *per_transfer_context,
564 			  dma_addr_t buffer,
565 			  unsigned int nbytes,
566 			  unsigned int transfer_id,
567 			  unsigned int flags)
568 {
569 	return ce_state->ops->ce_send_nolock(ce_state, per_transfer_context,
570 				    buffer, nbytes, transfer_id, flags);
571 }
572 EXPORT_SYMBOL(ath10k_ce_send_nolock);
573 
__ath10k_ce_send_revert(struct ath10k_ce_pipe * pipe)574 void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
575 {
576 	struct ath10k *ar = pipe->ar;
577 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
578 	struct ath10k_ce_ring *src_ring = pipe->src_ring;
579 	u32 ctrl_addr = pipe->ctrl_addr;
580 
581 	lockdep_assert_held(&ce->ce_lock);
582 
583 	/*
584 	 * This function must be called only if there is an incomplete
585 	 * scatter-gather transfer (before index register is updated)
586 	 * that needs to be cleaned up.
587 	 */
588 	if (WARN_ON_ONCE(src_ring->write_index == src_ring->sw_index))
589 		return;
590 
591 	if (WARN_ON_ONCE(src_ring->write_index ==
592 			 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr)))
593 		return;
594 
595 	src_ring->write_index--;
596 	src_ring->write_index &= src_ring->nentries_mask;
597 
598 	src_ring->per_transfer_context[src_ring->write_index] = NULL;
599 }
600 EXPORT_SYMBOL(__ath10k_ce_send_revert);
601 
ath10k_ce_send(struct ath10k_ce_pipe * ce_state,void * per_transfer_context,dma_addr_t buffer,unsigned int nbytes,unsigned int transfer_id,unsigned int flags)602 int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
603 		   void *per_transfer_context,
604 		   dma_addr_t buffer,
605 		   unsigned int nbytes,
606 		   unsigned int transfer_id,
607 		   unsigned int flags)
608 {
609 	struct ath10k *ar = ce_state->ar;
610 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
611 	int ret;
612 
613 	spin_lock_bh(&ce->ce_lock);
614 	ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
615 				    buffer, nbytes, transfer_id, flags);
616 	spin_unlock_bh(&ce->ce_lock);
617 
618 	return ret;
619 }
620 EXPORT_SYMBOL(ath10k_ce_send);
621 
ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe * pipe)622 int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
623 {
624 	struct ath10k *ar = pipe->ar;
625 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
626 	int delta;
627 
628 	spin_lock_bh(&ce->ce_lock);
629 	delta = CE_RING_DELTA(pipe->src_ring->nentries_mask,
630 			      pipe->src_ring->write_index,
631 			      pipe->src_ring->sw_index - 1);
632 	spin_unlock_bh(&ce->ce_lock);
633 
634 	return delta;
635 }
636 EXPORT_SYMBOL(ath10k_ce_num_free_src_entries);
637 
__ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe * pipe)638 int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
639 {
640 	struct ath10k *ar = pipe->ar;
641 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
642 	struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
643 	unsigned int nentries_mask = dest_ring->nentries_mask;
644 	unsigned int write_index = dest_ring->write_index;
645 	unsigned int sw_index = dest_ring->sw_index;
646 
647 	lockdep_assert_held(&ce->ce_lock);
648 
649 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
650 }
651 EXPORT_SYMBOL(__ath10k_ce_rx_num_free_bufs);
652 
__ath10k_ce_rx_post_buf(struct ath10k_ce_pipe * pipe,void * ctx,dma_addr_t paddr)653 static int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
654 				   dma_addr_t paddr)
655 {
656 	struct ath10k *ar = pipe->ar;
657 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
658 	struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
659 	unsigned int nentries_mask = dest_ring->nentries_mask;
660 	unsigned int write_index = dest_ring->write_index;
661 	unsigned int sw_index = dest_ring->sw_index;
662 	struct ce_desc *base = dest_ring->base_addr_owner_space;
663 	struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
664 	u32 ctrl_addr = pipe->ctrl_addr;
665 
666 	lockdep_assert_held(&ce->ce_lock);
667 
668 	if ((pipe->id != 5) &&
669 	    CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
670 		return -ENOSPC;
671 
672 	desc->addr = __cpu_to_le32(paddr);
673 	desc->nbytes = 0;
674 
675 	dest_ring->per_transfer_context[write_index] = ctx;
676 	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
677 	ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
678 	dest_ring->write_index = write_index;
679 
680 	return 0;
681 }
682 
__ath10k_ce_rx_post_buf_64(struct ath10k_ce_pipe * pipe,void * ctx,dma_addr_t paddr)683 static int __ath10k_ce_rx_post_buf_64(struct ath10k_ce_pipe *pipe,
684 				      void *ctx,
685 				      dma_addr_t paddr)
686 {
687 	struct ath10k *ar = pipe->ar;
688 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
689 	struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
690 	unsigned int nentries_mask = dest_ring->nentries_mask;
691 	unsigned int write_index = dest_ring->write_index;
692 	unsigned int sw_index = dest_ring->sw_index;
693 	struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
694 	struct ce_desc_64 *desc =
695 			CE_DEST_RING_TO_DESC_64(base, write_index);
696 	u32 ctrl_addr = pipe->ctrl_addr;
697 
698 	lockdep_assert_held(&ce->ce_lock);
699 
700 	if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
701 		return -ENOSPC;
702 
703 	desc->addr = __cpu_to_le64(paddr);
704 	desc->addr &= __cpu_to_le64(CE_DESC_ADDR_MASK);
705 
706 	desc->nbytes = 0;
707 
708 	dest_ring->per_transfer_context[write_index] = ctx;
709 	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
710 	ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
711 	dest_ring->write_index = write_index;
712 
713 	return 0;
714 }
715 
ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe * pipe,u32 nentries)716 void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
717 {
718 	struct ath10k *ar = pipe->ar;
719 	struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
720 	unsigned int nentries_mask = dest_ring->nentries_mask;
721 	unsigned int write_index = dest_ring->write_index;
722 	u32 ctrl_addr = pipe->ctrl_addr;
723 	u32 cur_write_idx = ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
724 
725 	/* Prevent CE ring stuck issue that will occur when ring is full.
726 	 * Make sure that write index is 1 less than read index.
727 	 */
728 	if (((cur_write_idx + nentries) & nentries_mask) == dest_ring->sw_index)
729 		nentries -= 1;
730 
731 	write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries);
732 	ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
733 	dest_ring->write_index = write_index;
734 }
735 EXPORT_SYMBOL(ath10k_ce_rx_update_write_idx);
736 
ath10k_ce_rx_post_buf(struct ath10k_ce_pipe * pipe,void * ctx,dma_addr_t paddr)737 int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
738 			  dma_addr_t paddr)
739 {
740 	struct ath10k *ar = pipe->ar;
741 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
742 	int ret;
743 
744 	spin_lock_bh(&ce->ce_lock);
745 	ret = pipe->ops->ce_rx_post_buf(pipe, ctx, paddr);
746 	spin_unlock_bh(&ce->ce_lock);
747 
748 	return ret;
749 }
750 EXPORT_SYMBOL(ath10k_ce_rx_post_buf);
751 
752 /*
753  * Guts of ath10k_ce_completed_recv_next.
754  * The caller takes responsibility for any necessary locking.
755  */
756 static int
_ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp,unsigned int * nbytesp)757 	 _ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
758 					       void **per_transfer_contextp,
759 					       unsigned int *nbytesp)
760 {
761 	struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
762 	unsigned int nentries_mask = dest_ring->nentries_mask;
763 	unsigned int sw_index = dest_ring->sw_index;
764 
765 	struct ce_desc *base = dest_ring->base_addr_owner_space;
766 	struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
767 	struct ce_desc sdesc;
768 	u16 nbytes;
769 
770 	/* Copy in one go for performance reasons */
771 	sdesc = *desc;
772 
773 	nbytes = __le16_to_cpu(sdesc.nbytes);
774 	if (nbytes == 0) {
775 		/*
776 		 * This closes a relatively unusual race where the Host
777 		 * sees the updated DRRI before the update to the
778 		 * corresponding descriptor has completed. We treat this
779 		 * as a descriptor that is not yet done.
780 		 */
781 		return -EIO;
782 	}
783 
784 	desc->nbytes = 0;
785 
786 	/* Return data from completed destination descriptor */
787 	*nbytesp = nbytes;
788 
789 	if (per_transfer_contextp)
790 		*per_transfer_contextp =
791 			dest_ring->per_transfer_context[sw_index];
792 
793 	/* Copy engine 5 (HTT Rx) will reuse the same transfer context.
794 	 * So update transfer context all CEs except CE5.
795 	 */
796 	if (ce_state->id != 5)
797 		dest_ring->per_transfer_context[sw_index] = NULL;
798 
799 	/* Update sw_index */
800 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
801 	dest_ring->sw_index = sw_index;
802 
803 	return 0;
804 }
805 
806 static int
_ath10k_ce_completed_recv_next_nolock_64(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp,unsigned int * nbytesp)807 _ath10k_ce_completed_recv_next_nolock_64(struct ath10k_ce_pipe *ce_state,
808 					 void **per_transfer_contextp,
809 					 unsigned int *nbytesp)
810 {
811 	struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
812 	unsigned int nentries_mask = dest_ring->nentries_mask;
813 	unsigned int sw_index = dest_ring->sw_index;
814 	struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
815 	struct ce_desc_64 *desc =
816 		CE_DEST_RING_TO_DESC_64(base, sw_index);
817 	struct ce_desc_64 sdesc;
818 	u16 nbytes;
819 
820 	/* Copy in one go for performance reasons */
821 	sdesc = *desc;
822 
823 	nbytes = __le16_to_cpu(sdesc.nbytes);
824 	if (nbytes == 0) {
825 		/* This closes a relatively unusual race where the Host
826 		 * sees the updated DRRI before the update to the
827 		 * corresponding descriptor has completed. We treat this
828 		 * as a descriptor that is not yet done.
829 		 */
830 		return -EIO;
831 	}
832 
833 	desc->nbytes = 0;
834 
835 	/* Return data from completed destination descriptor */
836 	*nbytesp = nbytes;
837 
838 	if (per_transfer_contextp)
839 		*per_transfer_contextp =
840 			dest_ring->per_transfer_context[sw_index];
841 
842 	/* Copy engine 5 (HTT Rx) will reuse the same transfer context.
843 	 * So update transfer context all CEs except CE5.
844 	 */
845 	if (ce_state->id != 5)
846 		dest_ring->per_transfer_context[sw_index] = NULL;
847 
848 	/* Update sw_index */
849 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
850 	dest_ring->sw_index = sw_index;
851 
852 	return 0;
853 }
854 
ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe * ce_state,void ** per_transfer_ctx,unsigned int * nbytesp)855 int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
856 					 void **per_transfer_ctx,
857 					 unsigned int *nbytesp)
858 {
859 	return ce_state->ops->ce_completed_recv_next_nolock(ce_state,
860 							    per_transfer_ctx,
861 							    nbytesp);
862 }
863 EXPORT_SYMBOL(ath10k_ce_completed_recv_next_nolock);
864 
ath10k_ce_completed_recv_next(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp,unsigned int * nbytesp)865 int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
866 				  void **per_transfer_contextp,
867 				  unsigned int *nbytesp)
868 {
869 	struct ath10k *ar = ce_state->ar;
870 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
871 	int ret;
872 
873 	spin_lock_bh(&ce->ce_lock);
874 	ret = ce_state->ops->ce_completed_recv_next_nolock(ce_state,
875 						   per_transfer_contextp,
876 						   nbytesp);
877 
878 	spin_unlock_bh(&ce->ce_lock);
879 
880 	return ret;
881 }
882 EXPORT_SYMBOL(ath10k_ce_completed_recv_next);
883 
_ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp,dma_addr_t * bufferp)884 static int _ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
885 				       void **per_transfer_contextp,
886 				       dma_addr_t *bufferp)
887 {
888 	struct ath10k_ce_ring *dest_ring;
889 	unsigned int nentries_mask;
890 	unsigned int sw_index;
891 	unsigned int write_index;
892 	int ret;
893 	struct ath10k *ar;
894 	struct ath10k_ce *ce;
895 
896 	dest_ring = ce_state->dest_ring;
897 
898 	if (!dest_ring)
899 		return -EIO;
900 
901 	ar = ce_state->ar;
902 	ce = ath10k_ce_priv(ar);
903 
904 	spin_lock_bh(&ce->ce_lock);
905 
906 	nentries_mask = dest_ring->nentries_mask;
907 	sw_index = dest_ring->sw_index;
908 	write_index = dest_ring->write_index;
909 	if (write_index != sw_index) {
910 		struct ce_desc *base = dest_ring->base_addr_owner_space;
911 		struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
912 
913 		/* Return data from completed destination descriptor */
914 		*bufferp = __le32_to_cpu(desc->addr);
915 
916 		if (per_transfer_contextp)
917 			*per_transfer_contextp =
918 				dest_ring->per_transfer_context[sw_index];
919 
920 		/* sanity */
921 		dest_ring->per_transfer_context[sw_index] = NULL;
922 		desc->nbytes = 0;
923 
924 		/* Update sw_index */
925 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
926 		dest_ring->sw_index = sw_index;
927 		ret = 0;
928 	} else {
929 		ret = -EIO;
930 	}
931 
932 	spin_unlock_bh(&ce->ce_lock);
933 
934 	return ret;
935 }
936 
_ath10k_ce_revoke_recv_next_64(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp,dma_addr_t * bufferp)937 static int _ath10k_ce_revoke_recv_next_64(struct ath10k_ce_pipe *ce_state,
938 					  void **per_transfer_contextp,
939 					  dma_addr_t *bufferp)
940 {
941 	struct ath10k_ce_ring *dest_ring;
942 	unsigned int nentries_mask;
943 	unsigned int sw_index;
944 	unsigned int write_index;
945 	int ret;
946 	struct ath10k *ar;
947 	struct ath10k_ce *ce;
948 
949 	dest_ring = ce_state->dest_ring;
950 
951 	if (!dest_ring)
952 		return -EIO;
953 
954 	ar = ce_state->ar;
955 	ce = ath10k_ce_priv(ar);
956 
957 	spin_lock_bh(&ce->ce_lock);
958 
959 	nentries_mask = dest_ring->nentries_mask;
960 	sw_index = dest_ring->sw_index;
961 	write_index = dest_ring->write_index;
962 	if (write_index != sw_index) {
963 		struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
964 		struct ce_desc_64 *desc =
965 			CE_DEST_RING_TO_DESC_64(base, sw_index);
966 
967 		/* Return data from completed destination descriptor */
968 		*bufferp = __le64_to_cpu(desc->addr);
969 
970 		if (per_transfer_contextp)
971 			*per_transfer_contextp =
972 				dest_ring->per_transfer_context[sw_index];
973 
974 		/* sanity */
975 		dest_ring->per_transfer_context[sw_index] = NULL;
976 		desc->nbytes = 0;
977 
978 		/* Update sw_index */
979 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
980 		dest_ring->sw_index = sw_index;
981 		ret = 0;
982 	} else {
983 		ret = -EIO;
984 	}
985 
986 	spin_unlock_bh(&ce->ce_lock);
987 
988 	return ret;
989 }
990 
ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp,dma_addr_t * bufferp)991 int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
992 			       void **per_transfer_contextp,
993 			       dma_addr_t *bufferp)
994 {
995 	return ce_state->ops->ce_revoke_recv_next(ce_state,
996 						  per_transfer_contextp,
997 						  bufferp);
998 }
999 EXPORT_SYMBOL(ath10k_ce_revoke_recv_next);
1000 
1001 /*
1002  * Guts of ath10k_ce_completed_send_next.
1003  * The caller takes responsibility for any necessary locking.
1004  */
_ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp)1005 static int _ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
1006 						 void **per_transfer_contextp)
1007 {
1008 	struct ath10k_ce_ring *src_ring = ce_state->src_ring;
1009 	u32 ctrl_addr = ce_state->ctrl_addr;
1010 	struct ath10k *ar = ce_state->ar;
1011 	unsigned int nentries_mask = src_ring->nentries_mask;
1012 	unsigned int sw_index = src_ring->sw_index;
1013 	unsigned int read_index;
1014 	struct ce_desc *desc;
1015 
1016 	if (src_ring->hw_index == sw_index) {
1017 		/*
1018 		 * The SW completion index has caught up with the cached
1019 		 * version of the HW completion index.
1020 		 * Update the cached HW completion index to see whether
1021 		 * the SW has really caught up to the HW, or if the cached
1022 		 * value of the HW index has become stale.
1023 		 */
1024 
1025 		read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
1026 		if (read_index == 0xffffffff)
1027 			return -ENODEV;
1028 
1029 		read_index &= nentries_mask;
1030 		src_ring->hw_index = read_index;
1031 	}
1032 
1033 	if (ar->hw_params.rri_on_ddr)
1034 		read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
1035 	else
1036 		read_index = src_ring->hw_index;
1037 
1038 	if (read_index == sw_index)
1039 		return -EIO;
1040 
1041 	if (per_transfer_contextp)
1042 		*per_transfer_contextp =
1043 			src_ring->per_transfer_context[sw_index];
1044 
1045 	/* sanity */
1046 	src_ring->per_transfer_context[sw_index] = NULL;
1047 	desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
1048 				   sw_index);
1049 	desc->nbytes = 0;
1050 
1051 	/* Update sw_index */
1052 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1053 	src_ring->sw_index = sw_index;
1054 
1055 	return 0;
1056 }
1057 
_ath10k_ce_completed_send_next_nolock_64(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp)1058 static int _ath10k_ce_completed_send_next_nolock_64(struct ath10k_ce_pipe *ce_state,
1059 						    void **per_transfer_contextp)
1060 {
1061 	struct ath10k_ce_ring *src_ring = ce_state->src_ring;
1062 	u32 ctrl_addr = ce_state->ctrl_addr;
1063 	struct ath10k *ar = ce_state->ar;
1064 	unsigned int nentries_mask = src_ring->nentries_mask;
1065 	unsigned int sw_index = src_ring->sw_index;
1066 	unsigned int read_index;
1067 	struct ce_desc_64 *desc;
1068 
1069 	if (src_ring->hw_index == sw_index) {
1070 		/*
1071 		 * The SW completion index has caught up with the cached
1072 		 * version of the HW completion index.
1073 		 * Update the cached HW completion index to see whether
1074 		 * the SW has really caught up to the HW, or if the cached
1075 		 * value of the HW index has become stale.
1076 		 */
1077 
1078 		read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
1079 		if (read_index == 0xffffffff)
1080 			return -ENODEV;
1081 
1082 		read_index &= nentries_mask;
1083 		src_ring->hw_index = read_index;
1084 	}
1085 
1086 	if (ar->hw_params.rri_on_ddr)
1087 		read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
1088 	else
1089 		read_index = src_ring->hw_index;
1090 
1091 	if (read_index == sw_index)
1092 		return -EIO;
1093 
1094 	if (per_transfer_contextp)
1095 		*per_transfer_contextp =
1096 			src_ring->per_transfer_context[sw_index];
1097 
1098 	/* sanity */
1099 	src_ring->per_transfer_context[sw_index] = NULL;
1100 	desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space,
1101 				      sw_index);
1102 	desc->nbytes = 0;
1103 
1104 	/* Update sw_index */
1105 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1106 	src_ring->sw_index = sw_index;
1107 
1108 	return 0;
1109 }
1110 
ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp)1111 int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
1112 					 void **per_transfer_contextp)
1113 {
1114 	return ce_state->ops->ce_completed_send_next_nolock(ce_state,
1115 							    per_transfer_contextp);
1116 }
1117 EXPORT_SYMBOL(ath10k_ce_completed_send_next_nolock);
1118 
ath10k_ce_extract_desc_data(struct ath10k * ar,struct ath10k_ce_ring * src_ring,u32 sw_index,dma_addr_t * bufferp,u32 * nbytesp,u32 * transfer_idp)1119 static void ath10k_ce_extract_desc_data(struct ath10k *ar,
1120 					struct ath10k_ce_ring *src_ring,
1121 					u32 sw_index,
1122 					dma_addr_t *bufferp,
1123 					u32 *nbytesp,
1124 					u32 *transfer_idp)
1125 {
1126 		struct ce_desc *base = src_ring->base_addr_owner_space;
1127 		struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
1128 
1129 		/* Return data from completed source descriptor */
1130 		*bufferp = __le32_to_cpu(desc->addr);
1131 		*nbytesp = __le16_to_cpu(desc->nbytes);
1132 		*transfer_idp = MS(__le16_to_cpu(desc->flags),
1133 				   CE_DESC_FLAGS_META_DATA);
1134 }
1135 
ath10k_ce_extract_desc_data_64(struct ath10k * ar,struct ath10k_ce_ring * src_ring,u32 sw_index,dma_addr_t * bufferp,u32 * nbytesp,u32 * transfer_idp)1136 static void ath10k_ce_extract_desc_data_64(struct ath10k *ar,
1137 					   struct ath10k_ce_ring *src_ring,
1138 					   u32 sw_index,
1139 					   dma_addr_t *bufferp,
1140 					   u32 *nbytesp,
1141 					   u32 *transfer_idp)
1142 {
1143 		struct ce_desc_64 *base = src_ring->base_addr_owner_space;
1144 		struct ce_desc_64 *desc =
1145 			CE_SRC_RING_TO_DESC_64(base, sw_index);
1146 
1147 		/* Return data from completed source descriptor */
1148 		*bufferp = __le64_to_cpu(desc->addr);
1149 		*nbytesp = __le16_to_cpu(desc->nbytes);
1150 		*transfer_idp = MS(__le16_to_cpu(desc->flags),
1151 				   CE_DESC_FLAGS_META_DATA);
1152 }
1153 
1154 /* NB: Modeled after ath10k_ce_completed_send_next */
ath10k_ce_cancel_send_next(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp,dma_addr_t * bufferp,unsigned int * nbytesp,unsigned int * transfer_idp)1155 int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
1156 			       void **per_transfer_contextp,
1157 			       dma_addr_t *bufferp,
1158 			       unsigned int *nbytesp,
1159 			       unsigned int *transfer_idp)
1160 {
1161 	struct ath10k_ce_ring *src_ring;
1162 	unsigned int nentries_mask;
1163 	unsigned int sw_index;
1164 	unsigned int write_index;
1165 	int ret;
1166 	struct ath10k *ar;
1167 	struct ath10k_ce *ce;
1168 
1169 	src_ring = ce_state->src_ring;
1170 
1171 	if (!src_ring)
1172 		return -EIO;
1173 
1174 	ar = ce_state->ar;
1175 	ce = ath10k_ce_priv(ar);
1176 
1177 	spin_lock_bh(&ce->ce_lock);
1178 
1179 	nentries_mask = src_ring->nentries_mask;
1180 	sw_index = src_ring->sw_index;
1181 	write_index = src_ring->write_index;
1182 
1183 	if (write_index != sw_index) {
1184 		ce_state->ops->ce_extract_desc_data(ar, src_ring, sw_index,
1185 						    bufferp, nbytesp,
1186 						    transfer_idp);
1187 
1188 		if (per_transfer_contextp)
1189 			*per_transfer_contextp =
1190 				src_ring->per_transfer_context[sw_index];
1191 
1192 		/* sanity */
1193 		src_ring->per_transfer_context[sw_index] = NULL;
1194 
1195 		/* Update sw_index */
1196 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1197 		src_ring->sw_index = sw_index;
1198 		ret = 0;
1199 	} else {
1200 		ret = -EIO;
1201 	}
1202 
1203 	spin_unlock_bh(&ce->ce_lock);
1204 
1205 	return ret;
1206 }
1207 EXPORT_SYMBOL(ath10k_ce_cancel_send_next);
1208 
ath10k_ce_completed_send_next(struct ath10k_ce_pipe * ce_state,void ** per_transfer_contextp)1209 int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
1210 				  void **per_transfer_contextp)
1211 {
1212 	struct ath10k *ar = ce_state->ar;
1213 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
1214 	int ret;
1215 
1216 	spin_lock_bh(&ce->ce_lock);
1217 	ret = ath10k_ce_completed_send_next_nolock(ce_state,
1218 						   per_transfer_contextp);
1219 	spin_unlock_bh(&ce->ce_lock);
1220 
1221 	return ret;
1222 }
1223 EXPORT_SYMBOL(ath10k_ce_completed_send_next);
1224 
1225 /*
1226  * Guts of interrupt handler for per-engine interrupts on a particular CE.
1227  *
1228  * Invokes registered callbacks for recv_complete,
1229  * send_complete, and watermarks.
1230  */
ath10k_ce_per_engine_service(struct ath10k * ar,unsigned int ce_id)1231 void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
1232 {
1233 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
1234 	struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1235 	const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
1236 	u32 ctrl_addr = ce_state->ctrl_addr;
1237 
1238 	/*
1239 	 * Clear before handling
1240 	 *
1241 	 * Misc CE interrupts are not being handled, but still need
1242 	 * to be cleared.
1243 	 *
1244 	 * NOTE: When the last copy engine interrupt is cleared the
1245 	 * hardware will go to sleep.  Once this happens any access to
1246 	 * the CE registers can cause a hardware fault.
1247 	 */
1248 	ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
1249 					  wm_regs->cc_mask | wm_regs->wm_mask);
1250 
1251 	if (ce_state->recv_cb)
1252 		ce_state->recv_cb(ce_state);
1253 
1254 	if (ce_state->send_cb)
1255 		ce_state->send_cb(ce_state);
1256 }
1257 EXPORT_SYMBOL(ath10k_ce_per_engine_service);
1258 
1259 /*
1260  * Handler for per-engine interrupts on ALL active CEs.
1261  * This is used in cases where the system is sharing a
1262  * single interrupt for all CEs
1263  */
1264 
ath10k_ce_per_engine_service_any(struct ath10k * ar)1265 void ath10k_ce_per_engine_service_any(struct ath10k *ar)
1266 {
1267 	int ce_id;
1268 	u32 intr_summary;
1269 
1270 	intr_summary = ath10k_ce_interrupt_summary(ar);
1271 
1272 	for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {
1273 		if (intr_summary & (1 << ce_id))
1274 			intr_summary &= ~(1 << ce_id);
1275 		else
1276 			/* no intr pending on this CE */
1277 			continue;
1278 
1279 		ath10k_ce_per_engine_service(ar, ce_id);
1280 	}
1281 }
1282 EXPORT_SYMBOL(ath10k_ce_per_engine_service_any);
1283 
1284 /*
1285  * Adjust interrupts for the copy complete handler.
1286  * If it's needed for either send or recv, then unmask
1287  * this interrupt; otherwise, mask it.
1288  *
1289  * Called with ce_lock held.
1290  */
ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe * ce_state)1291 static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state)
1292 {
1293 	u32 ctrl_addr = ce_state->ctrl_addr;
1294 	struct ath10k *ar = ce_state->ar;
1295 	bool disable_copy_compl_intr = ce_state->attr_flags & CE_ATTR_DIS_INTR;
1296 
1297 	if ((!disable_copy_compl_intr) &&
1298 	    (ce_state->send_cb || ce_state->recv_cb))
1299 		ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr);
1300 	else
1301 		ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
1302 
1303 	ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
1304 }
1305 
ath10k_ce_disable_interrupt(struct ath10k * ar,int ce_id)1306 void ath10k_ce_disable_interrupt(struct ath10k *ar, int ce_id)
1307 {
1308 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
1309 	struct ath10k_ce_pipe *ce_state;
1310 	u32 ctrl_addr;
1311 
1312 	ce_state  = &ce->ce_states[ce_id];
1313 	if (ce_state->attr_flags & CE_ATTR_POLL)
1314 		return;
1315 
1316 	ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1317 
1318 	ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
1319 	ath10k_ce_error_intr_disable(ar, ctrl_addr);
1320 	ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
1321 }
1322 EXPORT_SYMBOL(ath10k_ce_disable_interrupt);
1323 
ath10k_ce_disable_interrupts(struct ath10k * ar)1324 void ath10k_ce_disable_interrupts(struct ath10k *ar)
1325 {
1326 	int ce_id;
1327 
1328 	for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
1329 		ath10k_ce_disable_interrupt(ar, ce_id);
1330 }
1331 EXPORT_SYMBOL(ath10k_ce_disable_interrupts);
1332 
ath10k_ce_enable_interrupt(struct ath10k * ar,int ce_id)1333 void ath10k_ce_enable_interrupt(struct ath10k *ar, int ce_id)
1334 {
1335 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
1336 	struct ath10k_ce_pipe *ce_state;
1337 
1338 	ce_state  = &ce->ce_states[ce_id];
1339 	if (ce_state->attr_flags & CE_ATTR_POLL)
1340 		return;
1341 
1342 	ath10k_ce_per_engine_handler_adjust(ce_state);
1343 }
1344 EXPORT_SYMBOL(ath10k_ce_enable_interrupt);
1345 
ath10k_ce_enable_interrupts(struct ath10k * ar)1346 void ath10k_ce_enable_interrupts(struct ath10k *ar)
1347 {
1348 	int ce_id;
1349 
1350 	/* Enable interrupts for copy engine that
1351 	 * are not using polling mode.
1352 	 */
1353 	for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
1354 		ath10k_ce_enable_interrupt(ar, ce_id);
1355 }
1356 EXPORT_SYMBOL(ath10k_ce_enable_interrupts);
1357 
ath10k_ce_init_src_ring(struct ath10k * ar,unsigned int ce_id,const struct ce_attr * attr)1358 static int ath10k_ce_init_src_ring(struct ath10k *ar,
1359 				   unsigned int ce_id,
1360 				   const struct ce_attr *attr)
1361 {
1362 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
1363 	struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1364 	struct ath10k_ce_ring *src_ring = ce_state->src_ring;
1365 	u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1366 
1367 	nentries = roundup_pow_of_two(attr->src_nentries);
1368 
1369 	if (ar->hw_params.target_64bit)
1370 		memset(src_ring->base_addr_owner_space, 0,
1371 		       nentries * sizeof(struct ce_desc_64));
1372 	else
1373 		memset(src_ring->base_addr_owner_space, 0,
1374 		       nentries * sizeof(struct ce_desc));
1375 
1376 	src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
1377 	src_ring->sw_index &= src_ring->nentries_mask;
1378 	src_ring->hw_index = src_ring->sw_index;
1379 
1380 	src_ring->write_index =
1381 		ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
1382 	src_ring->write_index &= src_ring->nentries_mask;
1383 
1384 	ath10k_ce_src_ring_base_addr_set(ar, ce_id,
1385 					 src_ring->base_addr_ce_space);
1386 	ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
1387 	ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
1388 	ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
1389 	ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
1390 	ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
1391 
1392 	ath10k_dbg(ar, ATH10K_DBG_BOOT,
1393 		   "boot init ce src ring id %d entries %d base_addr %p\n",
1394 		   ce_id, nentries, src_ring->base_addr_owner_space);
1395 
1396 	return 0;
1397 }
1398 
ath10k_ce_init_dest_ring(struct ath10k * ar,unsigned int ce_id,const struct ce_attr * attr)1399 static int ath10k_ce_init_dest_ring(struct ath10k *ar,
1400 				    unsigned int ce_id,
1401 				    const struct ce_attr *attr)
1402 {
1403 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
1404 	struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1405 	struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
1406 	u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1407 
1408 	nentries = roundup_pow_of_two(attr->dest_nentries);
1409 
1410 	if (ar->hw_params.target_64bit)
1411 		memset(dest_ring->base_addr_owner_space, 0,
1412 		       nentries * sizeof(struct ce_desc_64));
1413 	else
1414 		memset(dest_ring->base_addr_owner_space, 0,
1415 		       nentries * sizeof(struct ce_desc));
1416 
1417 	dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
1418 	dest_ring->sw_index &= dest_ring->nentries_mask;
1419 	dest_ring->write_index =
1420 		ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
1421 	dest_ring->write_index &= dest_ring->nentries_mask;
1422 
1423 	ath10k_ce_dest_ring_base_addr_set(ar, ce_id,
1424 					  dest_ring->base_addr_ce_space);
1425 	ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
1426 	ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
1427 	ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
1428 	ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
1429 
1430 	ath10k_dbg(ar, ATH10K_DBG_BOOT,
1431 		   "boot ce dest ring id %d entries %d base_addr %p\n",
1432 		   ce_id, nentries, dest_ring->base_addr_owner_space);
1433 
1434 	return 0;
1435 }
1436 
ath10k_ce_alloc_shadow_base(struct ath10k * ar,struct ath10k_ce_ring * src_ring,u32 nentries)1437 static int ath10k_ce_alloc_shadow_base(struct ath10k *ar,
1438 				       struct ath10k_ce_ring *src_ring,
1439 				       u32 nentries)
1440 {
1441 	src_ring->shadow_base_unaligned = kcalloc(nentries,
1442 						  sizeof(struct ce_desc_64),
1443 						  GFP_KERNEL);
1444 	if (!src_ring->shadow_base_unaligned)
1445 		return -ENOMEM;
1446 
1447 	src_ring->shadow_base = (struct ce_desc_64 *)
1448 			PTR_ALIGN(src_ring->shadow_base_unaligned,
1449 				  CE_DESC_RING_ALIGN);
1450 	return 0;
1451 }
1452 
1453 static struct ath10k_ce_ring *
ath10k_ce_alloc_src_ring(struct ath10k * ar,unsigned int ce_id,const struct ce_attr * attr)1454 ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
1455 			 const struct ce_attr *attr)
1456 {
1457 	struct ath10k_ce_ring *src_ring;
1458 	u32 nentries = attr->src_nentries;
1459 	dma_addr_t base_addr;
1460 	int ret;
1461 
1462 	nentries = roundup_pow_of_two(nentries);
1463 
1464 	src_ring = kzalloc(struct_size(src_ring, per_transfer_context,
1465 				       nentries), GFP_KERNEL);
1466 	if (src_ring == NULL)
1467 		return ERR_PTR(-ENOMEM);
1468 
1469 	src_ring->nentries = nentries;
1470 	src_ring->nentries_mask = nentries - 1;
1471 
1472 	/*
1473 	 * Legacy platforms that do not support cache
1474 	 * coherent DMA are unsupported
1475 	 */
1476 	src_ring->base_addr_owner_space_unaligned =
1477 		dma_alloc_coherent(ar->dev,
1478 				   (nentries * sizeof(struct ce_desc) +
1479 				    CE_DESC_RING_ALIGN),
1480 				   &base_addr, GFP_KERNEL);
1481 	if (!src_ring->base_addr_owner_space_unaligned) {
1482 		kfree(src_ring);
1483 		return ERR_PTR(-ENOMEM);
1484 	}
1485 
1486 	src_ring->base_addr_ce_space_unaligned = base_addr;
1487 
1488 	src_ring->base_addr_owner_space =
1489 			PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
1490 				  CE_DESC_RING_ALIGN);
1491 	src_ring->base_addr_ce_space =
1492 			ALIGN(src_ring->base_addr_ce_space_unaligned,
1493 			      CE_DESC_RING_ALIGN);
1494 
1495 	if (ar->hw_params.shadow_reg_support) {
1496 		ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
1497 		if (ret) {
1498 			dma_free_coherent(ar->dev,
1499 					  (nentries * sizeof(struct ce_desc) +
1500 					   CE_DESC_RING_ALIGN),
1501 					  src_ring->base_addr_owner_space_unaligned,
1502 					  base_addr);
1503 			kfree(src_ring);
1504 			return ERR_PTR(ret);
1505 		}
1506 	}
1507 
1508 	return src_ring;
1509 }
1510 
1511 static struct ath10k_ce_ring *
ath10k_ce_alloc_src_ring_64(struct ath10k * ar,unsigned int ce_id,const struct ce_attr * attr)1512 ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id,
1513 			    const struct ce_attr *attr)
1514 {
1515 	struct ath10k_ce_ring *src_ring;
1516 	u32 nentries = attr->src_nentries;
1517 	dma_addr_t base_addr;
1518 	int ret;
1519 
1520 	nentries = roundup_pow_of_two(nentries);
1521 
1522 	src_ring = kzalloc(struct_size(src_ring, per_transfer_context,
1523 				       nentries), GFP_KERNEL);
1524 	if (!src_ring)
1525 		return ERR_PTR(-ENOMEM);
1526 
1527 	src_ring->nentries = nentries;
1528 	src_ring->nentries_mask = nentries - 1;
1529 
1530 	/* Legacy platforms that do not support cache
1531 	 * coherent DMA are unsupported
1532 	 */
1533 	src_ring->base_addr_owner_space_unaligned =
1534 		dma_alloc_coherent(ar->dev,
1535 				   (nentries * sizeof(struct ce_desc_64) +
1536 				    CE_DESC_RING_ALIGN),
1537 				   &base_addr, GFP_KERNEL);
1538 	if (!src_ring->base_addr_owner_space_unaligned) {
1539 		kfree(src_ring);
1540 		return ERR_PTR(-ENOMEM);
1541 	}
1542 
1543 	src_ring->base_addr_ce_space_unaligned = base_addr;
1544 
1545 	src_ring->base_addr_owner_space =
1546 			PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
1547 				  CE_DESC_RING_ALIGN);
1548 	src_ring->base_addr_ce_space =
1549 			ALIGN(src_ring->base_addr_ce_space_unaligned,
1550 			      CE_DESC_RING_ALIGN);
1551 
1552 	if (ar->hw_params.shadow_reg_support) {
1553 		ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
1554 		if (ret) {
1555 			dma_free_coherent(ar->dev,
1556 					  (nentries * sizeof(struct ce_desc_64) +
1557 					   CE_DESC_RING_ALIGN),
1558 					  src_ring->base_addr_owner_space_unaligned,
1559 					  base_addr);
1560 			kfree(src_ring);
1561 			return ERR_PTR(ret);
1562 		}
1563 	}
1564 
1565 	return src_ring;
1566 }
1567 
1568 static struct ath10k_ce_ring *
ath10k_ce_alloc_dest_ring(struct ath10k * ar,unsigned int ce_id,const struct ce_attr * attr)1569 ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
1570 			  const struct ce_attr *attr)
1571 {
1572 	struct ath10k_ce_ring *dest_ring;
1573 	u32 nentries;
1574 	dma_addr_t base_addr;
1575 
1576 	nentries = roundup_pow_of_two(attr->dest_nentries);
1577 
1578 	dest_ring = kzalloc(struct_size(dest_ring, per_transfer_context,
1579 					nentries), GFP_KERNEL);
1580 	if (dest_ring == NULL)
1581 		return ERR_PTR(-ENOMEM);
1582 
1583 	dest_ring->nentries = nentries;
1584 	dest_ring->nentries_mask = nentries - 1;
1585 
1586 	/*
1587 	 * Legacy platforms that do not support cache
1588 	 * coherent DMA are unsupported
1589 	 */
1590 	dest_ring->base_addr_owner_space_unaligned =
1591 		dma_alloc_coherent(ar->dev,
1592 				   (nentries * sizeof(struct ce_desc) +
1593 				    CE_DESC_RING_ALIGN),
1594 				   &base_addr, GFP_KERNEL);
1595 	if (!dest_ring->base_addr_owner_space_unaligned) {
1596 		kfree(dest_ring);
1597 		return ERR_PTR(-ENOMEM);
1598 	}
1599 
1600 	dest_ring->base_addr_ce_space_unaligned = base_addr;
1601 
1602 	dest_ring->base_addr_owner_space =
1603 			PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
1604 				  CE_DESC_RING_ALIGN);
1605 	dest_ring->base_addr_ce_space =
1606 				ALIGN(dest_ring->base_addr_ce_space_unaligned,
1607 				      CE_DESC_RING_ALIGN);
1608 
1609 	return dest_ring;
1610 }
1611 
1612 static struct ath10k_ce_ring *
ath10k_ce_alloc_dest_ring_64(struct ath10k * ar,unsigned int ce_id,const struct ce_attr * attr)1613 ath10k_ce_alloc_dest_ring_64(struct ath10k *ar, unsigned int ce_id,
1614 			     const struct ce_attr *attr)
1615 {
1616 	struct ath10k_ce_ring *dest_ring;
1617 	u32 nentries;
1618 	dma_addr_t base_addr;
1619 
1620 	nentries = roundup_pow_of_two(attr->dest_nentries);
1621 
1622 	dest_ring = kzalloc(struct_size(dest_ring, per_transfer_context,
1623 					nentries), GFP_KERNEL);
1624 	if (!dest_ring)
1625 		return ERR_PTR(-ENOMEM);
1626 
1627 	dest_ring->nentries = nentries;
1628 	dest_ring->nentries_mask = nentries - 1;
1629 
1630 	/* Legacy platforms that do not support cache
1631 	 * coherent DMA are unsupported
1632 	 */
1633 	dest_ring->base_addr_owner_space_unaligned =
1634 		dma_alloc_coherent(ar->dev,
1635 				   (nentries * sizeof(struct ce_desc_64) +
1636 				    CE_DESC_RING_ALIGN),
1637 				   &base_addr, GFP_KERNEL);
1638 	if (!dest_ring->base_addr_owner_space_unaligned) {
1639 		kfree(dest_ring);
1640 		return ERR_PTR(-ENOMEM);
1641 	}
1642 
1643 	dest_ring->base_addr_ce_space_unaligned = base_addr;
1644 
1645 	/* Correctly initialize memory to 0 to prevent garbage
1646 	 * data crashing system when download firmware
1647 	 */
1648 	dest_ring->base_addr_owner_space =
1649 			PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
1650 				  CE_DESC_RING_ALIGN);
1651 	dest_ring->base_addr_ce_space =
1652 			ALIGN(dest_ring->base_addr_ce_space_unaligned,
1653 			      CE_DESC_RING_ALIGN);
1654 
1655 	return dest_ring;
1656 }
1657 
1658 /*
1659  * Initialize a Copy Engine based on caller-supplied attributes.
1660  * This may be called once to initialize both source and destination
1661  * rings or it may be called twice for separate source and destination
1662  * initialization. It may be that only one side or the other is
1663  * initialized by software/firmware.
1664  */
ath10k_ce_init_pipe(struct ath10k * ar,unsigned int ce_id,const struct ce_attr * attr)1665 int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
1666 			const struct ce_attr *attr)
1667 {
1668 	int ret;
1669 
1670 	if (attr->src_nentries) {
1671 		ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
1672 		if (ret) {
1673 			ath10k_err(ar, "Failed to initialize CE src ring for ID: %d (%d)\n",
1674 				   ce_id, ret);
1675 			return ret;
1676 		}
1677 	}
1678 
1679 	if (attr->dest_nentries) {
1680 		ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
1681 		if (ret) {
1682 			ath10k_err(ar, "Failed to initialize CE dest ring for ID: %d (%d)\n",
1683 				   ce_id, ret);
1684 			return ret;
1685 		}
1686 	}
1687 
1688 	return 0;
1689 }
1690 EXPORT_SYMBOL(ath10k_ce_init_pipe);
1691 
ath10k_ce_deinit_src_ring(struct ath10k * ar,unsigned int ce_id)1692 static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
1693 {
1694 	u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1695 
1696 	ath10k_ce_src_ring_base_addr_set(ar, ce_id, 0);
1697 	ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
1698 	ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
1699 	ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
1700 }
1701 
ath10k_ce_deinit_dest_ring(struct ath10k * ar,unsigned int ce_id)1702 static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
1703 {
1704 	u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1705 
1706 	ath10k_ce_dest_ring_base_addr_set(ar, ce_id, 0);
1707 	ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
1708 	ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
1709 }
1710 
ath10k_ce_deinit_pipe(struct ath10k * ar,unsigned int ce_id)1711 void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
1712 {
1713 	ath10k_ce_deinit_src_ring(ar, ce_id);
1714 	ath10k_ce_deinit_dest_ring(ar, ce_id);
1715 }
1716 EXPORT_SYMBOL(ath10k_ce_deinit_pipe);
1717 
_ath10k_ce_free_pipe(struct ath10k * ar,int ce_id)1718 static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
1719 {
1720 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
1721 	struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1722 
1723 	if (ce_state->src_ring) {
1724 		if (ar->hw_params.shadow_reg_support)
1725 			kfree(ce_state->src_ring->shadow_base_unaligned);
1726 		dma_free_coherent(ar->dev,
1727 				  (ce_state->src_ring->nentries *
1728 				   sizeof(struct ce_desc) +
1729 				   CE_DESC_RING_ALIGN),
1730 				  ce_state->src_ring->base_addr_owner_space,
1731 				  ce_state->src_ring->base_addr_ce_space);
1732 		kfree(ce_state->src_ring);
1733 	}
1734 
1735 	if (ce_state->dest_ring) {
1736 		dma_free_coherent(ar->dev,
1737 				  (ce_state->dest_ring->nentries *
1738 				   sizeof(struct ce_desc) +
1739 				   CE_DESC_RING_ALIGN),
1740 				  ce_state->dest_ring->base_addr_owner_space,
1741 				  ce_state->dest_ring->base_addr_ce_space);
1742 		kfree(ce_state->dest_ring);
1743 	}
1744 
1745 	ce_state->src_ring = NULL;
1746 	ce_state->dest_ring = NULL;
1747 }
1748 
_ath10k_ce_free_pipe_64(struct ath10k * ar,int ce_id)1749 static void _ath10k_ce_free_pipe_64(struct ath10k *ar, int ce_id)
1750 {
1751 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
1752 	struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1753 
1754 	if (ce_state->src_ring) {
1755 		if (ar->hw_params.shadow_reg_support)
1756 			kfree(ce_state->src_ring->shadow_base_unaligned);
1757 		dma_free_coherent(ar->dev,
1758 				  (ce_state->src_ring->nentries *
1759 				   sizeof(struct ce_desc_64) +
1760 				   CE_DESC_RING_ALIGN),
1761 				  ce_state->src_ring->base_addr_owner_space,
1762 				  ce_state->src_ring->base_addr_ce_space);
1763 		kfree(ce_state->src_ring);
1764 	}
1765 
1766 	if (ce_state->dest_ring) {
1767 		dma_free_coherent(ar->dev,
1768 				  (ce_state->dest_ring->nentries *
1769 				   sizeof(struct ce_desc_64) +
1770 				   CE_DESC_RING_ALIGN),
1771 				  ce_state->dest_ring->base_addr_owner_space,
1772 				  ce_state->dest_ring->base_addr_ce_space);
1773 		kfree(ce_state->dest_ring);
1774 	}
1775 
1776 	ce_state->src_ring = NULL;
1777 	ce_state->dest_ring = NULL;
1778 }
1779 
ath10k_ce_free_pipe(struct ath10k * ar,int ce_id)1780 void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
1781 {
1782 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
1783 	struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1784 
1785 	ce_state->ops->ce_free_pipe(ar, ce_id);
1786 }
1787 EXPORT_SYMBOL(ath10k_ce_free_pipe);
1788 
ath10k_ce_dump_registers(struct ath10k * ar,struct ath10k_fw_crash_data * crash_data)1789 void ath10k_ce_dump_registers(struct ath10k *ar,
1790 			      struct ath10k_fw_crash_data *crash_data)
1791 {
1792 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
1793 	struct ath10k_ce_crash_data ce_data;
1794 	u32 addr, id;
1795 
1796 	lockdep_assert_held(&ar->dump_mutex);
1797 
1798 	ath10k_err(ar, "Copy Engine register dump:\n");
1799 
1800 	spin_lock_bh(&ce->ce_lock);
1801 	for (id = 0; id < CE_COUNT; id++) {
1802 		addr = ath10k_ce_base_address(ar, id);
1803 		ce_data.base_addr = cpu_to_le32(addr);
1804 
1805 		ce_data.src_wr_idx =
1806 			cpu_to_le32(ath10k_ce_src_ring_write_index_get(ar, addr));
1807 		ce_data.src_r_idx =
1808 			cpu_to_le32(ath10k_ce_src_ring_read_index_get(ar, addr));
1809 		ce_data.dst_wr_idx =
1810 			cpu_to_le32(ath10k_ce_dest_ring_write_index_get(ar, addr));
1811 		ce_data.dst_r_idx =
1812 			cpu_to_le32(ath10k_ce_dest_ring_read_index_get(ar, addr));
1813 
1814 		if (crash_data)
1815 			crash_data->ce_crash_data[id] = ce_data;
1816 
1817 		ath10k_err(ar, "[%02d]: 0x%08x %3u %3u %3u %3u", id,
1818 			   le32_to_cpu(ce_data.base_addr),
1819 			   le32_to_cpu(ce_data.src_wr_idx),
1820 			   le32_to_cpu(ce_data.src_r_idx),
1821 			   le32_to_cpu(ce_data.dst_wr_idx),
1822 			   le32_to_cpu(ce_data.dst_r_idx));
1823 	}
1824 
1825 	spin_unlock_bh(&ce->ce_lock);
1826 }
1827 EXPORT_SYMBOL(ath10k_ce_dump_registers);
1828 
1829 static const struct ath10k_ce_ops ce_ops = {
1830 	.ce_alloc_src_ring = ath10k_ce_alloc_src_ring,
1831 	.ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring,
1832 	.ce_rx_post_buf = __ath10k_ce_rx_post_buf,
1833 	.ce_completed_recv_next_nolock = _ath10k_ce_completed_recv_next_nolock,
1834 	.ce_revoke_recv_next = _ath10k_ce_revoke_recv_next,
1835 	.ce_extract_desc_data = ath10k_ce_extract_desc_data,
1836 	.ce_free_pipe = _ath10k_ce_free_pipe,
1837 	.ce_send_nolock = _ath10k_ce_send_nolock,
1838 	.ce_set_src_ring_base_addr_hi = NULL,
1839 	.ce_set_dest_ring_base_addr_hi = NULL,
1840 	.ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock,
1841 };
1842 
1843 static const struct ath10k_ce_ops ce_64_ops = {
1844 	.ce_alloc_src_ring = ath10k_ce_alloc_src_ring_64,
1845 	.ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring_64,
1846 	.ce_rx_post_buf = __ath10k_ce_rx_post_buf_64,
1847 	.ce_completed_recv_next_nolock =
1848 				_ath10k_ce_completed_recv_next_nolock_64,
1849 	.ce_revoke_recv_next = _ath10k_ce_revoke_recv_next_64,
1850 	.ce_extract_desc_data = ath10k_ce_extract_desc_data_64,
1851 	.ce_free_pipe = _ath10k_ce_free_pipe_64,
1852 	.ce_send_nolock = _ath10k_ce_send_nolock_64,
1853 	.ce_set_src_ring_base_addr_hi = ath10k_ce_set_src_ring_base_addr_hi,
1854 	.ce_set_dest_ring_base_addr_hi = ath10k_ce_set_dest_ring_base_addr_hi,
1855 	.ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock_64,
1856 };
1857 
ath10k_ce_set_ops(struct ath10k * ar,struct ath10k_ce_pipe * ce_state)1858 static void ath10k_ce_set_ops(struct ath10k *ar,
1859 			      struct ath10k_ce_pipe *ce_state)
1860 {
1861 	switch (ar->hw_rev) {
1862 	case ATH10K_HW_WCN3990:
1863 		ce_state->ops = &ce_64_ops;
1864 		break;
1865 	default:
1866 		ce_state->ops = &ce_ops;
1867 		break;
1868 	}
1869 }
1870 
ath10k_ce_alloc_pipe(struct ath10k * ar,int ce_id,const struct ce_attr * attr)1871 int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
1872 			 const struct ce_attr *attr)
1873 {
1874 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
1875 	struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1876 	int ret;
1877 
1878 	ath10k_ce_set_ops(ar, ce_state);
1879 	/* Make sure there's enough CE ringbuffer entries for HTT TX to avoid
1880 	 * additional TX locking checks.
1881 	 *
1882 	 * For the lack of a better place do the check here.
1883 	 */
1884 	BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
1885 		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1886 	BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC >
1887 		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1888 	BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
1889 		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1890 
1891 	ce_state->ar = ar;
1892 	ce_state->id = ce_id;
1893 	ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1894 	ce_state->attr_flags = attr->flags;
1895 	ce_state->src_sz_max = attr->src_sz_max;
1896 
1897 	if (attr->src_nentries)
1898 		ce_state->send_cb = attr->send_cb;
1899 
1900 	if (attr->dest_nentries)
1901 		ce_state->recv_cb = attr->recv_cb;
1902 
1903 	if (attr->src_nentries) {
1904 		ce_state->src_ring =
1905 			ce_state->ops->ce_alloc_src_ring(ar, ce_id, attr);
1906 		if (IS_ERR(ce_state->src_ring)) {
1907 			ret = PTR_ERR(ce_state->src_ring);
1908 			ath10k_err(ar, "failed to alloc CE src ring %d: %d\n",
1909 				   ce_id, ret);
1910 			ce_state->src_ring = NULL;
1911 			return ret;
1912 		}
1913 	}
1914 
1915 	if (attr->dest_nentries) {
1916 		ce_state->dest_ring = ce_state->ops->ce_alloc_dst_ring(ar,
1917 									ce_id,
1918 									attr);
1919 		if (IS_ERR(ce_state->dest_ring)) {
1920 			ret = PTR_ERR(ce_state->dest_ring);
1921 			ath10k_err(ar, "failed to alloc CE dest ring %d: %d\n",
1922 				   ce_id, ret);
1923 			ce_state->dest_ring = NULL;
1924 			return ret;
1925 		}
1926 	}
1927 
1928 	return 0;
1929 }
1930 EXPORT_SYMBOL(ath10k_ce_alloc_pipe);
1931 
ath10k_ce_alloc_rri(struct ath10k * ar)1932 void ath10k_ce_alloc_rri(struct ath10k *ar)
1933 {
1934 	int i;
1935 	u32 value;
1936 	u32 ctrl1_regs;
1937 	u32 ce_base_addr;
1938 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
1939 
1940 	ce->vaddr_rri = dma_alloc_coherent(ar->dev,
1941 					   (CE_COUNT * sizeof(u32)),
1942 					   &ce->paddr_rri, GFP_KERNEL);
1943 
1944 	if (!ce->vaddr_rri)
1945 		return;
1946 
1947 	ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_low,
1948 			  lower_32_bits(ce->paddr_rri));
1949 	ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_high,
1950 			  (upper_32_bits(ce->paddr_rri) &
1951 			  CE_DESC_ADDR_HI_MASK));
1952 
1953 	for (i = 0; i < CE_COUNT; i++) {
1954 		ctrl1_regs = ar->hw_ce_regs->ctrl1_regs->addr;
1955 		ce_base_addr = ath10k_ce_base_address(ar, i);
1956 		value = ath10k_ce_read32(ar, ce_base_addr + ctrl1_regs);
1957 		value |= ar->hw_ce_regs->upd->mask;
1958 		ath10k_ce_write32(ar, ce_base_addr + ctrl1_regs, value);
1959 	}
1960 }
1961 EXPORT_SYMBOL(ath10k_ce_alloc_rri);
1962 
ath10k_ce_free_rri(struct ath10k * ar)1963 void ath10k_ce_free_rri(struct ath10k *ar)
1964 {
1965 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
1966 
1967 	dma_free_coherent(ar->dev, (CE_COUNT * sizeof(u32)),
1968 			  ce->vaddr_rri,
1969 			  ce->paddr_rri);
1970 }
1971 EXPORT_SYMBOL(ath10k_ce_free_rri);
1972