1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) ST-Ericsson SA 2007-2010
4 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
5 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/dmaengine.h>
10
11 #include "ste_dma40.h"
12 #include "ste_dma40_ll.h"
13
d40_width_to_bits(enum dma_slave_buswidth width)14 static u8 d40_width_to_bits(enum dma_slave_buswidth width)
15 {
16 if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
17 return STEDMA40_ESIZE_8_BIT;
18 else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
19 return STEDMA40_ESIZE_16_BIT;
20 else if (width == DMA_SLAVE_BUSWIDTH_8_BYTES)
21 return STEDMA40_ESIZE_64_BIT;
22 else
23 return STEDMA40_ESIZE_32_BIT;
24 }
25
26 /* Sets up proper LCSP1 and LCSP3 register for a logical channel */
d40_log_cfg(struct stedma40_chan_cfg * cfg,u32 * lcsp1,u32 * lcsp3)27 void d40_log_cfg(struct stedma40_chan_cfg *cfg,
28 u32 *lcsp1, u32 *lcsp3)
29 {
30 u32 l3 = 0; /* dst */
31 u32 l1 = 0; /* src */
32
33 /* src is mem? -> increase address pos */
34 if (cfg->dir == DMA_MEM_TO_DEV ||
35 cfg->dir == DMA_MEM_TO_MEM)
36 l1 |= BIT(D40_MEM_LCSP1_SCFG_INCR_POS);
37
38 /* dst is mem? -> increase address pos */
39 if (cfg->dir == DMA_DEV_TO_MEM ||
40 cfg->dir == DMA_MEM_TO_MEM)
41 l3 |= BIT(D40_MEM_LCSP3_DCFG_INCR_POS);
42
43 /* src is hw? -> master port 1 */
44 if (cfg->dir == DMA_DEV_TO_MEM ||
45 cfg->dir == DMA_DEV_TO_DEV)
46 l1 |= BIT(D40_MEM_LCSP1_SCFG_MST_POS);
47
48 /* dst is hw? -> master port 1 */
49 if (cfg->dir == DMA_MEM_TO_DEV ||
50 cfg->dir == DMA_DEV_TO_DEV)
51 l3 |= BIT(D40_MEM_LCSP3_DCFG_MST_POS);
52
53 l3 |= BIT(D40_MEM_LCSP3_DCFG_EIM_POS);
54 l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS;
55 l3 |= d40_width_to_bits(cfg->dst_info.data_width)
56 << D40_MEM_LCSP3_DCFG_ESIZE_POS;
57
58 l1 |= BIT(D40_MEM_LCSP1_SCFG_EIM_POS);
59 l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
60 l1 |= d40_width_to_bits(cfg->src_info.data_width)
61 << D40_MEM_LCSP1_SCFG_ESIZE_POS;
62
63 *lcsp1 = l1;
64 *lcsp3 = l3;
65
66 }
67
d40_phy_cfg(struct stedma40_chan_cfg * cfg,u32 * src_cfg,u32 * dst_cfg)68 void d40_phy_cfg(struct stedma40_chan_cfg *cfg, u32 *src_cfg, u32 *dst_cfg)
69 {
70 u32 src = 0;
71 u32 dst = 0;
72
73 if ((cfg->dir == DMA_DEV_TO_MEM) ||
74 (cfg->dir == DMA_DEV_TO_DEV)) {
75 /* Set master port to 1 */
76 src |= BIT(D40_SREG_CFG_MST_POS);
77 src |= D40_TYPE_TO_EVENT(cfg->dev_type);
78
79 if (cfg->src_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL)
80 src |= BIT(D40_SREG_CFG_PHY_TM_POS);
81 else
82 src |= 3 << D40_SREG_CFG_PHY_TM_POS;
83 }
84 if ((cfg->dir == DMA_MEM_TO_DEV) ||
85 (cfg->dir == DMA_DEV_TO_DEV)) {
86 /* Set master port to 1 */
87 dst |= BIT(D40_SREG_CFG_MST_POS);
88 dst |= D40_TYPE_TO_EVENT(cfg->dev_type);
89
90 if (cfg->dst_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL)
91 dst |= BIT(D40_SREG_CFG_PHY_TM_POS);
92 else
93 dst |= 3 << D40_SREG_CFG_PHY_TM_POS;
94 }
95 /* Interrupt on end of transfer for destination */
96 dst |= BIT(D40_SREG_CFG_TIM_POS);
97
98 /* Generate interrupt on error */
99 src |= BIT(D40_SREG_CFG_EIM_POS);
100 dst |= BIT(D40_SREG_CFG_EIM_POS);
101
102 /* PSIZE */
103 if (cfg->src_info.psize != STEDMA40_PSIZE_PHY_1) {
104 src |= BIT(D40_SREG_CFG_PHY_PEN_POS);
105 src |= cfg->src_info.psize << D40_SREG_CFG_PSIZE_POS;
106 }
107 if (cfg->dst_info.psize != STEDMA40_PSIZE_PHY_1) {
108 dst |= BIT(D40_SREG_CFG_PHY_PEN_POS);
109 dst |= cfg->dst_info.psize << D40_SREG_CFG_PSIZE_POS;
110 }
111
112 /* Element size */
113 src |= d40_width_to_bits(cfg->src_info.data_width)
114 << D40_SREG_CFG_ESIZE_POS;
115 dst |= d40_width_to_bits(cfg->dst_info.data_width)
116 << D40_SREG_CFG_ESIZE_POS;
117
118 /* Set the priority bit to high for the physical channel */
119 if (cfg->high_priority) {
120 src |= BIT(D40_SREG_CFG_PRI_POS);
121 dst |= BIT(D40_SREG_CFG_PRI_POS);
122 }
123
124 if (cfg->src_info.big_endian)
125 src |= BIT(D40_SREG_CFG_LBE_POS);
126 if (cfg->dst_info.big_endian)
127 dst |= BIT(D40_SREG_CFG_LBE_POS);
128
129 *src_cfg = src;
130 *dst_cfg = dst;
131 }
132
d40_phy_fill_lli(struct d40_phy_lli * lli,dma_addr_t data,u32 data_size,dma_addr_t next_lli,u32 reg_cfg,struct stedma40_half_channel_info * info,unsigned int flags)133 static int d40_phy_fill_lli(struct d40_phy_lli *lli,
134 dma_addr_t data,
135 u32 data_size,
136 dma_addr_t next_lli,
137 u32 reg_cfg,
138 struct stedma40_half_channel_info *info,
139 unsigned int flags)
140 {
141 bool addr_inc = flags & LLI_ADDR_INC;
142 bool term_int = flags & LLI_TERM_INT;
143 unsigned int data_width = info->data_width;
144 int psize = info->psize;
145 int num_elems;
146
147 if (psize == STEDMA40_PSIZE_PHY_1)
148 num_elems = 1;
149 else
150 num_elems = 2 << psize;
151
152 /* Must be aligned */
153 if (!IS_ALIGNED(data, data_width))
154 return -EINVAL;
155
156 /* Transfer size can't be smaller than (num_elms * elem_size) */
157 if (data_size < num_elems * data_width)
158 return -EINVAL;
159
160 /* The number of elements. IE now many chunks */
161 lli->reg_elt = (data_size / data_width) << D40_SREG_ELEM_PHY_ECNT_POS;
162
163 /*
164 * Distance to next element sized entry.
165 * Usually the size of the element unless you want gaps.
166 */
167 if (addr_inc)
168 lli->reg_elt |= data_width << D40_SREG_ELEM_PHY_EIDX_POS;
169
170 /* Where the data is */
171 lli->reg_ptr = data;
172 lli->reg_cfg = reg_cfg;
173
174 /* If this scatter list entry is the last one, no next link */
175 if (next_lli == 0)
176 lli->reg_lnk = BIT(D40_SREG_LNK_PHY_TCP_POS);
177 else
178 lli->reg_lnk = next_lli;
179
180 /* Set/clear interrupt generation on this link item.*/
181 if (term_int)
182 lli->reg_cfg |= BIT(D40_SREG_CFG_TIM_POS);
183 else
184 lli->reg_cfg &= ~BIT(D40_SREG_CFG_TIM_POS);
185
186 /*
187 * Post link - D40_SREG_LNK_PHY_PRE_POS = 0
188 * Relink happens after transfer completion.
189 */
190
191 return 0;
192 }
193
d40_seg_size(int size,int data_width1,int data_width2)194 static int d40_seg_size(int size, int data_width1, int data_width2)
195 {
196 u32 max_w = max(data_width1, data_width2);
197 u32 min_w = min(data_width1, data_width2);
198 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w);
199
200 if (seg_max > STEDMA40_MAX_SEG_SIZE)
201 seg_max -= max_w;
202
203 if (size <= seg_max)
204 return size;
205
206 if (size <= 2 * seg_max)
207 return ALIGN(size / 2, max_w);
208
209 return seg_max;
210 }
211
212 static struct d40_phy_lli *
d40_phy_buf_to_lli(struct d40_phy_lli * lli,dma_addr_t addr,u32 size,dma_addr_t lli_phys,dma_addr_t first_phys,u32 reg_cfg,struct stedma40_half_channel_info * info,struct stedma40_half_channel_info * otherinfo,unsigned long flags)213 d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size,
214 dma_addr_t lli_phys, dma_addr_t first_phys, u32 reg_cfg,
215 struct stedma40_half_channel_info *info,
216 struct stedma40_half_channel_info *otherinfo,
217 unsigned long flags)
218 {
219 bool lastlink = flags & LLI_LAST_LINK;
220 bool addr_inc = flags & LLI_ADDR_INC;
221 bool term_int = flags & LLI_TERM_INT;
222 bool cyclic = flags & LLI_CYCLIC;
223 int err;
224 dma_addr_t next = lli_phys;
225 int size_rest = size;
226 int size_seg = 0;
227
228 /*
229 * This piece may be split up based on d40_seg_size(); we only want the
230 * term int on the last part.
231 */
232 if (term_int)
233 flags &= ~LLI_TERM_INT;
234
235 do {
236 size_seg = d40_seg_size(size_rest, info->data_width,
237 otherinfo->data_width);
238 size_rest -= size_seg;
239
240 if (size_rest == 0 && term_int)
241 flags |= LLI_TERM_INT;
242
243 if (size_rest == 0 && lastlink)
244 next = cyclic ? first_phys : 0;
245 else
246 next = ALIGN(next + sizeof(struct d40_phy_lli),
247 D40_LLI_ALIGN);
248
249 err = d40_phy_fill_lli(lli, addr, size_seg, next,
250 reg_cfg, info, flags);
251
252 if (err)
253 goto err;
254
255 lli++;
256 if (addr_inc)
257 addr += size_seg;
258 } while (size_rest);
259
260 return lli;
261
262 err:
263 return NULL;
264 }
265
d40_phy_sg_to_lli(struct scatterlist * sg,int sg_len,dma_addr_t target,struct d40_phy_lli * lli_sg,dma_addr_t lli_phys,u32 reg_cfg,struct stedma40_half_channel_info * info,struct stedma40_half_channel_info * otherinfo,unsigned long flags)266 int d40_phy_sg_to_lli(struct scatterlist *sg,
267 int sg_len,
268 dma_addr_t target,
269 struct d40_phy_lli *lli_sg,
270 dma_addr_t lli_phys,
271 u32 reg_cfg,
272 struct stedma40_half_channel_info *info,
273 struct stedma40_half_channel_info *otherinfo,
274 unsigned long flags)
275 {
276 int total_size = 0;
277 int i;
278 struct scatterlist *current_sg = sg;
279 struct d40_phy_lli *lli = lli_sg;
280 dma_addr_t l_phys = lli_phys;
281
282 if (!target)
283 flags |= LLI_ADDR_INC;
284
285 for_each_sg(sg, current_sg, sg_len, i) {
286 dma_addr_t sg_addr = sg_dma_address(current_sg);
287 unsigned int len = sg_dma_len(current_sg);
288 dma_addr_t dst = target ?: sg_addr;
289
290 total_size += sg_dma_len(current_sg);
291
292 if (i == sg_len - 1)
293 flags |= LLI_TERM_INT | LLI_LAST_LINK;
294
295 l_phys = ALIGN(lli_phys + (lli - lli_sg) *
296 sizeof(struct d40_phy_lli), D40_LLI_ALIGN);
297
298 lli = d40_phy_buf_to_lli(lli, dst, len, l_phys, lli_phys,
299 reg_cfg, info, otherinfo, flags);
300
301 if (lli == NULL)
302 return -EINVAL;
303 }
304
305 return total_size;
306 }
307
308
309 /* DMA logical lli operations */
310
d40_log_lli_link(struct d40_log_lli * lli_dst,struct d40_log_lli * lli_src,int next,unsigned int flags)311 static void d40_log_lli_link(struct d40_log_lli *lli_dst,
312 struct d40_log_lli *lli_src,
313 int next, unsigned int flags)
314 {
315 bool interrupt = flags & LLI_TERM_INT;
316 u32 slos = 0;
317 u32 dlos = 0;
318
319 if (next != -EINVAL) {
320 slos = next * 2;
321 dlos = next * 2 + 1;
322 }
323
324 if (interrupt) {
325 lli_dst->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK;
326 lli_dst->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK;
327 }
328
329 lli_src->lcsp13 = (lli_src->lcsp13 & ~D40_MEM_LCSP1_SLOS_MASK) |
330 (slos << D40_MEM_LCSP1_SLOS_POS);
331
332 lli_dst->lcsp13 = (lli_dst->lcsp13 & ~D40_MEM_LCSP1_SLOS_MASK) |
333 (dlos << D40_MEM_LCSP1_SLOS_POS);
334 }
335
d40_log_lli_lcpa_write(struct d40_log_lli_full * lcpa,struct d40_log_lli * lli_dst,struct d40_log_lli * lli_src,int next,unsigned int flags)336 void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
337 struct d40_log_lli *lli_dst,
338 struct d40_log_lli *lli_src,
339 int next, unsigned int flags)
340 {
341 d40_log_lli_link(lli_dst, lli_src, next, flags);
342
343 writel_relaxed(lli_src->lcsp02, &lcpa[0].lcsp0);
344 writel_relaxed(lli_src->lcsp13, &lcpa[0].lcsp1);
345 writel_relaxed(lli_dst->lcsp02, &lcpa[0].lcsp2);
346 writel_relaxed(lli_dst->lcsp13, &lcpa[0].lcsp3);
347 }
348
d40_log_lli_lcla_write(struct d40_log_lli * lcla,struct d40_log_lli * lli_dst,struct d40_log_lli * lli_src,int next,unsigned int flags)349 void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
350 struct d40_log_lli *lli_dst,
351 struct d40_log_lli *lli_src,
352 int next, unsigned int flags)
353 {
354 d40_log_lli_link(lli_dst, lli_src, next, flags);
355
356 writel_relaxed(lli_src->lcsp02, &lcla[0].lcsp02);
357 writel_relaxed(lli_src->lcsp13, &lcla[0].lcsp13);
358 writel_relaxed(lli_dst->lcsp02, &lcla[1].lcsp02);
359 writel_relaxed(lli_dst->lcsp13, &lcla[1].lcsp13);
360 }
361
d40_log_fill_lli(struct d40_log_lli * lli,dma_addr_t data,u32 data_size,u32 reg_cfg,u32 data_width,unsigned int flags)362 static void d40_log_fill_lli(struct d40_log_lli *lli,
363 dma_addr_t data, u32 data_size,
364 u32 reg_cfg,
365 u32 data_width,
366 unsigned int flags)
367 {
368 bool addr_inc = flags & LLI_ADDR_INC;
369
370 lli->lcsp13 = reg_cfg;
371
372 /* The number of elements to transfer */
373 lli->lcsp02 = ((data_size / data_width) <<
374 D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK;
375
376 BUG_ON((data_size / data_width) > STEDMA40_MAX_SEG_SIZE);
377
378 /* 16 LSBs address of the current element */
379 lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK;
380 /* 16 MSBs address of the current element */
381 lli->lcsp13 |= data & D40_MEM_LCSP1_SPTR_MASK;
382
383 if (addr_inc)
384 lli->lcsp13 |= D40_MEM_LCSP1_SCFG_INCR_MASK;
385
386 }
387
d40_log_buf_to_lli(struct d40_log_lli * lli_sg,dma_addr_t addr,int size,u32 lcsp13,u32 data_width1,u32 data_width2,unsigned int flags)388 static struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
389 dma_addr_t addr,
390 int size,
391 u32 lcsp13, /* src or dst*/
392 u32 data_width1,
393 u32 data_width2,
394 unsigned int flags)
395 {
396 bool addr_inc = flags & LLI_ADDR_INC;
397 struct d40_log_lli *lli = lli_sg;
398 int size_rest = size;
399 int size_seg = 0;
400
401 do {
402 size_seg = d40_seg_size(size_rest, data_width1, data_width2);
403 size_rest -= size_seg;
404
405 d40_log_fill_lli(lli,
406 addr,
407 size_seg,
408 lcsp13, data_width1,
409 flags);
410 if (addr_inc)
411 addr += size_seg;
412 lli++;
413 } while (size_rest);
414
415 return lli;
416 }
417
d40_log_sg_to_lli(struct scatterlist * sg,int sg_len,dma_addr_t dev_addr,struct d40_log_lli * lli_sg,u32 lcsp13,u32 data_width1,u32 data_width2)418 int d40_log_sg_to_lli(struct scatterlist *sg,
419 int sg_len,
420 dma_addr_t dev_addr,
421 struct d40_log_lli *lli_sg,
422 u32 lcsp13, /* src or dst*/
423 u32 data_width1, u32 data_width2)
424 {
425 int total_size = 0;
426 struct scatterlist *current_sg = sg;
427 int i;
428 struct d40_log_lli *lli = lli_sg;
429 unsigned long flags = 0;
430
431 if (!dev_addr)
432 flags |= LLI_ADDR_INC;
433
434 for_each_sg(sg, current_sg, sg_len, i) {
435 dma_addr_t sg_addr = sg_dma_address(current_sg);
436 unsigned int len = sg_dma_len(current_sg);
437 dma_addr_t addr = dev_addr ?: sg_addr;
438
439 total_size += sg_dma_len(current_sg);
440
441 lli = d40_log_buf_to_lli(lli, addr, len,
442 lcsp13,
443 data_width1,
444 data_width2,
445 flags);
446 }
447
448 return total_size;
449 }
450