1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * STM32 DMA3 controller driver
4 *
5 * Copyright (C) STMicroelectronics 2024
6 * Author(s): Amelie Delaunay <amelie.delaunay@foss.st.com>
7 */
8
9 #include <linux/bitfield.h>
10 #include <linux/clk.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmaengine.h>
13 #include <linux/dmapool.h>
14 #include <linux/init.h>
15 #include <linux/iopoll.h>
16 #include <linux/list.h>
17 #include <linux/module.h>
18 #include <linux/of_dma.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/reset.h>
22 #include <linux/slab.h>
23
24 #include "../virt-dma.h"
25
26 #define STM32_DMA3_SECCFGR 0x00
27 #define STM32_DMA3_PRIVCFGR 0x04
28 #define STM32_DMA3_RCFGLOCKR 0x08
29 #define STM32_DMA3_MISR 0x0c
30 #define STM32_DMA3_SMISR 0x10
31
32 #define STM32_DMA3_CLBAR(x) (0x50 + 0x80 * (x))
33 #define STM32_DMA3_CCIDCFGR(x) (0x54 + 0x80 * (x))
34 #define STM32_DMA3_CSEMCR(x) (0x58 + 0x80 * (x))
35 #define STM32_DMA3_CFCR(x) (0x5c + 0x80 * (x))
36 #define STM32_DMA3_CSR(x) (0x60 + 0x80 * (x))
37 #define STM32_DMA3_CCR(x) (0x64 + 0x80 * (x))
38 #define STM32_DMA3_CTR1(x) (0x90 + 0x80 * (x))
39 #define STM32_DMA3_CTR2(x) (0x94 + 0x80 * (x))
40 #define STM32_DMA3_CBR1(x) (0x98 + 0x80 * (x))
41 #define STM32_DMA3_CSAR(x) (0x9c + 0x80 * (x))
42 #define STM32_DMA3_CDAR(x) (0xa0 + 0x80 * (x))
43 #define STM32_DMA3_CLLR(x) (0xcc + 0x80 * (x))
44
45 #define STM32_DMA3_HWCFGR13 0xfc0 /* G_PER_CTRL(X) x=8..15 */
46 #define STM32_DMA3_HWCFGR12 0xfc4 /* G_PER_CTRL(X) x=0..7 */
47 #define STM32_DMA3_HWCFGR4 0xfe4 /* G_FIFO_SIZE(X) x=8..15 */
48 #define STM32_DMA3_HWCFGR3 0xfe8 /* G_FIFO_SIZE(X) x=0..7 */
49 #define STM32_DMA3_HWCFGR2 0xfec /* G_MAX_REQ_ID */
50 #define STM32_DMA3_HWCFGR1 0xff0 /* G_MASTER_PORTS, G_NUM_CHANNELS, G_Mx_DATA_WIDTH */
51 #define STM32_DMA3_VERR 0xff4
52
53 /* SECCFGR DMA secure configuration register */
54 #define SECCFGR_SEC(x) BIT(x)
55
56 /* MISR DMA non-secure/secure masked interrupt status register */
57 #define MISR_MIS(x) BIT(x)
58
59 /* CxLBAR DMA channel x linked_list base address register */
60 #define CLBAR_LBA GENMASK(31, 16)
61
62 /* CxCIDCFGR DMA channel x CID register */
63 #define CCIDCFGR_CFEN BIT(0)
64 #define CCIDCFGR_SEM_EN BIT(1)
65 #define CCIDCFGR_SCID GENMASK(5, 4)
66 #define CCIDCFGR_SEM_WLIST_CID0 BIT(16)
67 #define CCIDCFGR_SEM_WLIST_CID1 BIT(17)
68 #define CCIDCFGR_SEM_WLIST_CID2 BIT(18)
69
70 enum ccidcfgr_cid {
71 CCIDCFGR_CID0,
72 CCIDCFGR_CID1,
73 CCIDCFGR_CID2,
74 };
75
76 /* CxSEMCR DMA channel x semaphore control register */
77 #define CSEMCR_SEM_MUTEX BIT(0)
78 #define CSEMCR_SEM_CCID GENMASK(5, 4)
79
80 /* CxFCR DMA channel x flag clear register */
81 #define CFCR_TCF BIT(8)
82 #define CFCR_HTF BIT(9)
83 #define CFCR_DTEF BIT(10)
84 #define CFCR_ULEF BIT(11)
85 #define CFCR_USEF BIT(12)
86 #define CFCR_SUSPF BIT(13)
87
88 /* CxSR DMA channel x status register */
89 #define CSR_IDLEF BIT(0)
90 #define CSR_TCF BIT(8)
91 #define CSR_HTF BIT(9)
92 #define CSR_DTEF BIT(10)
93 #define CSR_ULEF BIT(11)
94 #define CSR_USEF BIT(12)
95 #define CSR_SUSPF BIT(13)
96 #define CSR_ALL_F GENMASK(13, 8)
97 #define CSR_FIFOL GENMASK(24, 16)
98
99 /* CxCR DMA channel x control register */
100 #define CCR_EN BIT(0)
101 #define CCR_RESET BIT(1)
102 #define CCR_SUSP BIT(2)
103 #define CCR_TCIE BIT(8)
104 #define CCR_HTIE BIT(9)
105 #define CCR_DTEIE BIT(10)
106 #define CCR_ULEIE BIT(11)
107 #define CCR_USEIE BIT(12)
108 #define CCR_SUSPIE BIT(13)
109 #define CCR_ALLIE GENMASK(13, 8)
110 #define CCR_LSM BIT(16)
111 #define CCR_LAP BIT(17)
112 #define CCR_PRIO GENMASK(23, 22)
113
114 enum ccr_prio {
115 CCR_PRIO_LOW,
116 CCR_PRIO_MID,
117 CCR_PRIO_HIGH,
118 CCR_PRIO_VERY_HIGH,
119 };
120
121 /* CxTR1 DMA channel x transfer register 1 */
122 #define CTR1_SINC BIT(3)
123 #define CTR1_SBL_1 GENMASK(9, 4)
124 #define CTR1_DINC BIT(19)
125 #define CTR1_DBL_1 GENMASK(25, 20)
126 #define CTR1_SDW_LOG2 GENMASK(1, 0)
127 #define CTR1_PAM GENMASK(12, 11)
128 #define CTR1_SAP BIT(14)
129 #define CTR1_DDW_LOG2 GENMASK(17, 16)
130 #define CTR1_DAP BIT(30)
131
132 enum ctr1_dw {
133 CTR1_DW_BYTE,
134 CTR1_DW_HWORD,
135 CTR1_DW_WORD,
136 CTR1_DW_DWORD, /* Depends on HWCFGR1.G_M0_DATA_WIDTH_ENC and .G_M1_DATA_WIDTH_ENC */
137 };
138
139 enum ctr1_pam {
140 CTR1_PAM_0S_LT, /* if DDW > SDW, padded with 0s else left-truncated */
141 CTR1_PAM_SE_RT, /* if DDW > SDW, sign extended else right-truncated */
142 CTR1_PAM_PACK_UNPACK, /* FIFO queued */
143 };
144
145 /* CxTR2 DMA channel x transfer register 2 */
146 #define CTR2_REQSEL GENMASK(7, 0)
147 #define CTR2_SWREQ BIT(9)
148 #define CTR2_DREQ BIT(10)
149 #define CTR2_BREQ BIT(11)
150 #define CTR2_PFREQ BIT(12)
151 #define CTR2_TCEM GENMASK(31, 30)
152
153 enum ctr2_tcem {
154 CTR2_TCEM_BLOCK,
155 CTR2_TCEM_REPEAT_BLOCK,
156 CTR2_TCEM_LLI,
157 CTR2_TCEM_CHANNEL,
158 };
159
160 /* CxBR1 DMA channel x block register 1 */
161 #define CBR1_BNDT GENMASK(15, 0)
162
163 /* CxLLR DMA channel x linked-list address register */
164 #define CLLR_LA GENMASK(15, 2)
165 #define CLLR_ULL BIT(16)
166 #define CLLR_UDA BIT(27)
167 #define CLLR_USA BIT(28)
168 #define CLLR_UB1 BIT(29)
169 #define CLLR_UT2 BIT(30)
170 #define CLLR_UT1 BIT(31)
171
172 /* HWCFGR13 DMA hardware configuration register 13 x=8..15 */
173 /* HWCFGR12 DMA hardware configuration register 12 x=0..7 */
174 #define G_PER_CTRL(x) (ULL(0x1) << (4 * (x)))
175
176 /* HWCFGR4 DMA hardware configuration register 4 x=8..15 */
177 /* HWCFGR3 DMA hardware configuration register 3 x=0..7 */
178 #define G_FIFO_SIZE(x) (ULL(0x7) << (4 * (x)))
179
180 #define get_chan_hwcfg(x, mask, reg) (((reg) & (mask)) >> (4 * (x)))
181
182 /* HWCFGR2 DMA hardware configuration register 2 */
183 #define G_MAX_REQ_ID GENMASK(7, 0)
184
185 /* HWCFGR1 DMA hardware configuration register 1 */
186 #define G_MASTER_PORTS GENMASK(2, 0)
187 #define G_NUM_CHANNELS GENMASK(12, 8)
188 #define G_M0_DATA_WIDTH_ENC GENMASK(25, 24)
189 #define G_M1_DATA_WIDTH_ENC GENMASK(29, 28)
190
191 enum stm32_dma3_master_ports {
192 AXI64, /* 1x AXI: 64-bit port 0 */
193 AHB32, /* 1x AHB: 32-bit port 0 */
194 AHB32_AHB32, /* 2x AHB: 32-bit port 0 and 32-bit port 1 */
195 AXI64_AHB32, /* 1x AXI 64-bit port 0 and 1x AHB 32-bit port 1 */
196 AXI64_AXI64, /* 2x AXI: 64-bit port 0 and 64-bit port 1 */
197 AXI128_AHB32, /* 1x AXI 128-bit port 0 and 1x AHB 32-bit port 1 */
198 };
199
200 enum stm32_dma3_port_data_width {
201 DW_32, /* 32-bit, for AHB */
202 DW_64, /* 64-bit, for AXI */
203 DW_128, /* 128-bit, for AXI */
204 DW_INVALID,
205 };
206
207 /* VERR DMA version register */
208 #define VERR_MINREV GENMASK(3, 0)
209 #define VERR_MAJREV GENMASK(7, 4)
210
211 /* Device tree */
212 /* struct stm32_dma3_dt_conf */
213 /* .ch_conf */
214 #define STM32_DMA3_DT_PRIO GENMASK(1, 0) /* CCR_PRIO */
215 #define STM32_DMA3_DT_FIFO GENMASK(7, 4)
216 /* .tr_conf */
217 #define STM32_DMA3_DT_SINC BIT(0) /* CTR1_SINC */
218 #define STM32_DMA3_DT_SAP BIT(1) /* CTR1_SAP */
219 #define STM32_DMA3_DT_DINC BIT(4) /* CTR1_DINC */
220 #define STM32_DMA3_DT_DAP BIT(5) /* CTR1_DAP */
221 #define STM32_DMA3_DT_BREQ BIT(8) /* CTR2_BREQ */
222 #define STM32_DMA3_DT_PFREQ BIT(9) /* CTR2_PFREQ */
223 #define STM32_DMA3_DT_TCEM GENMASK(13, 12) /* CTR2_TCEM */
224 #define STM32_DMA3_DT_NOPACK BIT(16) /* CTR1_PAM */
225 #define STM32_DMA3_DT_NOREFACT BIT(17)
226
227 /* struct stm32_dma3_chan .config_set bitfield */
228 #define STM32_DMA3_CFG_SET_DT BIT(0)
229 #define STM32_DMA3_CFG_SET_DMA BIT(1)
230 #define STM32_DMA3_CFG_SET_BOTH (STM32_DMA3_CFG_SET_DT | STM32_DMA3_CFG_SET_DMA)
231
232 #define STM32_DMA3_MAX_BLOCK_SIZE ALIGN_DOWN(CBR1_BNDT, 64)
233 #define STM32_DMA3_MAX_BURST_LEN (1 + min_t(u32, FIELD_MAX(CTR1_SBL_1), \
234 FIELD_MAX(CTR1_DBL_1)))
235 #define port_is_ahb(maxdw) ({ typeof(maxdw) (_maxdw) = (maxdw); \
236 ((_maxdw) != DW_INVALID) && ((_maxdw) == DW_32); })
237 #define port_is_axi(maxdw) ({ typeof(maxdw) (_maxdw) = (maxdw); \
238 ((_maxdw) != DW_INVALID) && ((_maxdw) != DW_32); })
239 #define get_chan_max_dw(maxdw, maxburst)((port_is_ahb(maxdw) || \
240 (maxburst) < DMA_SLAVE_BUSWIDTH_8_BYTES) ? \
241 DMA_SLAVE_BUSWIDTH_4_BYTES : DMA_SLAVE_BUSWIDTH_8_BYTES)
242
243 /* Static linked-list data structure (depends on update bits UT1/UT2/UB1/USA/UDA/ULL) */
244 struct stm32_dma3_hwdesc {
245 u32 ctr1;
246 u32 ctr2;
247 u32 cbr1;
248 u32 csar;
249 u32 cdar;
250 u32 cllr;
251 } __packed __aligned(32);
252
253 /*
254 * CLLR_LA / sizeof(struct stm32_dma3_hwdesc) represents the number of hdwdesc that can be addressed
255 * by the pointer to the next linked-list data structure. The __aligned forces the 32-byte
256 * alignment. So use hardcoded 32. Multiplied by the max block size of each item, it represents
257 * the sg size limitation.
258 */
259 #define STM32_DMA3_MAX_SEG_SIZE ((CLLR_LA / 32) * STM32_DMA3_MAX_BLOCK_SIZE)
260
261 /*
262 * Linked-list items
263 */
264 struct stm32_dma3_lli {
265 struct stm32_dma3_hwdesc *hwdesc;
266 dma_addr_t hwdesc_addr;
267 };
268
269 struct stm32_dma3_swdesc {
270 struct virt_dma_desc vdesc;
271 u32 ccr;
272 bool cyclic;
273 u32 lli_size;
274 struct stm32_dma3_lli lli[] __counted_by(lli_size);
275 };
276
277 struct stm32_dma3_dt_conf {
278 u32 ch_id;
279 u32 req_line;
280 u32 ch_conf;
281 u32 tr_conf;
282 };
283
284 struct stm32_dma3_chan {
285 struct virt_dma_chan vchan;
286 u32 id;
287 int irq;
288 u32 fifo_size;
289 u32 max_burst;
290 bool semaphore_mode;
291 bool semaphore_taken;
292 struct stm32_dma3_dt_conf dt_config;
293 struct dma_slave_config dma_config;
294 u8 config_set;
295 struct dma_pool *lli_pool;
296 struct stm32_dma3_swdesc *swdesc;
297 enum ctr2_tcem tcem;
298 u32 dma_status;
299 };
300
301 struct stm32_dma3_pdata {
302 u32 axi_max_burst_len;
303 };
304
305 struct stm32_dma3_ddata {
306 struct dma_device dma_dev;
307 void __iomem *base;
308 struct clk *clk;
309 struct stm32_dma3_chan *chans;
310 u32 dma_channels;
311 u32 dma_requests;
312 enum stm32_dma3_port_data_width ports_max_dw[2];
313 u32 axi_max_burst_len;
314 };
315
to_stm32_dma3_ddata(struct stm32_dma3_chan * chan)316 static inline struct stm32_dma3_ddata *to_stm32_dma3_ddata(struct stm32_dma3_chan *chan)
317 {
318 return container_of(chan->vchan.chan.device, struct stm32_dma3_ddata, dma_dev);
319 }
320
to_stm32_dma3_chan(struct dma_chan * c)321 static inline struct stm32_dma3_chan *to_stm32_dma3_chan(struct dma_chan *c)
322 {
323 return container_of(c, struct stm32_dma3_chan, vchan.chan);
324 }
325
to_stm32_dma3_swdesc(struct virt_dma_desc * vdesc)326 static inline struct stm32_dma3_swdesc *to_stm32_dma3_swdesc(struct virt_dma_desc *vdesc)
327 {
328 return container_of(vdesc, struct stm32_dma3_swdesc, vdesc);
329 }
330
chan2dev(struct stm32_dma3_chan * chan)331 static struct device *chan2dev(struct stm32_dma3_chan *chan)
332 {
333 return &chan->vchan.chan.dev->device;
334 }
335
ddata2dev(struct stm32_dma3_ddata * ddata)336 static struct device *ddata2dev(struct stm32_dma3_ddata *ddata)
337 {
338 return ddata->dma_dev.dev;
339 }
340
stm32_dma3_chan_dump_reg(struct stm32_dma3_chan * chan)341 static void stm32_dma3_chan_dump_reg(struct stm32_dma3_chan *chan)
342 {
343 struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
344 struct device *dev = chan2dev(chan);
345 u32 id = chan->id, offset;
346
347 offset = STM32_DMA3_SECCFGR;
348 dev_dbg(dev, "SECCFGR(0x%03x): %08x\n", offset, readl_relaxed(ddata->base + offset));
349 offset = STM32_DMA3_PRIVCFGR;
350 dev_dbg(dev, "PRIVCFGR(0x%03x): %08x\n", offset, readl_relaxed(ddata->base + offset));
351 offset = STM32_DMA3_CCIDCFGR(id);
352 dev_dbg(dev, "C%dCIDCFGR(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset));
353 offset = STM32_DMA3_CSEMCR(id);
354 dev_dbg(dev, "C%dSEMCR(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset));
355 offset = STM32_DMA3_CSR(id);
356 dev_dbg(dev, "C%dSR(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset));
357 offset = STM32_DMA3_CCR(id);
358 dev_dbg(dev, "C%dCR(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset));
359 offset = STM32_DMA3_CTR1(id);
360 dev_dbg(dev, "C%dTR1(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset));
361 offset = STM32_DMA3_CTR2(id);
362 dev_dbg(dev, "C%dTR2(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset));
363 offset = STM32_DMA3_CBR1(id);
364 dev_dbg(dev, "C%dBR1(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset));
365 offset = STM32_DMA3_CSAR(id);
366 dev_dbg(dev, "C%dSAR(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset));
367 offset = STM32_DMA3_CDAR(id);
368 dev_dbg(dev, "C%dDAR(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset));
369 offset = STM32_DMA3_CLLR(id);
370 dev_dbg(dev, "C%dLLR(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset));
371 offset = STM32_DMA3_CLBAR(id);
372 dev_dbg(dev, "C%dLBAR(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset));
373 }
374
stm32_dma3_chan_dump_hwdesc(struct stm32_dma3_chan * chan,struct stm32_dma3_swdesc * swdesc)375 static void stm32_dma3_chan_dump_hwdesc(struct stm32_dma3_chan *chan,
376 struct stm32_dma3_swdesc *swdesc)
377 {
378 struct stm32_dma3_hwdesc *hwdesc;
379 int i;
380
381 for (i = 0; i < swdesc->lli_size; i++) {
382 hwdesc = swdesc->lli[i].hwdesc;
383 if (i)
384 dev_dbg(chan2dev(chan), "V\n");
385 dev_dbg(chan2dev(chan), "[%d]@%pad\n", i, &swdesc->lli[i].hwdesc_addr);
386 dev_dbg(chan2dev(chan), "| C%dTR1: %08x\n", chan->id, hwdesc->ctr1);
387 dev_dbg(chan2dev(chan), "| C%dTR2: %08x\n", chan->id, hwdesc->ctr2);
388 dev_dbg(chan2dev(chan), "| C%dBR1: %08x\n", chan->id, hwdesc->cbr1);
389 dev_dbg(chan2dev(chan), "| C%dSAR: %08x\n", chan->id, hwdesc->csar);
390 dev_dbg(chan2dev(chan), "| C%dDAR: %08x\n", chan->id, hwdesc->cdar);
391 dev_dbg(chan2dev(chan), "| C%dLLR: %08x\n", chan->id, hwdesc->cllr);
392 }
393
394 if (swdesc->cyclic) {
395 dev_dbg(chan2dev(chan), "|\n");
396 dev_dbg(chan2dev(chan), "-->[0]@%pad\n", &swdesc->lli[0].hwdesc_addr);
397 } else {
398 dev_dbg(chan2dev(chan), "X\n");
399 }
400 }
401
stm32_dma3_chan_desc_alloc(struct stm32_dma3_chan * chan,u32 count)402 static struct stm32_dma3_swdesc *stm32_dma3_chan_desc_alloc(struct stm32_dma3_chan *chan, u32 count)
403 {
404 struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
405 struct stm32_dma3_swdesc *swdesc;
406 int i;
407
408 /*
409 * If the memory to be allocated for the number of hwdesc (6 u32 members but 32-bytes
410 * aligned) is greater than the maximum address of CLLR_LA, then the last items can't be
411 * addressed, so abort the allocation.
412 */
413 if ((count * 32) > CLLR_LA) {
414 dev_err(chan2dev(chan), "Transfer is too big (> %luB)\n", STM32_DMA3_MAX_SEG_SIZE);
415 return NULL;
416 }
417
418 swdesc = kzalloc_flex(*swdesc, lli, count, GFP_NOWAIT);
419 if (!swdesc)
420 return NULL;
421 swdesc->lli_size = count;
422
423 for (i = 0; i < count; i++) {
424 swdesc->lli[i].hwdesc = dma_pool_zalloc(chan->lli_pool, GFP_NOWAIT,
425 &swdesc->lli[i].hwdesc_addr);
426 if (!swdesc->lli[i].hwdesc)
427 goto err_pool_free;
428 }
429 swdesc->ccr = 0;
430
431 /* Set LL base address */
432 writel_relaxed(swdesc->lli[0].hwdesc_addr & CLBAR_LBA,
433 ddata->base + STM32_DMA3_CLBAR(chan->id));
434
435 /* Set LL allocated port */
436 swdesc->ccr &= ~CCR_LAP;
437
438 return swdesc;
439
440 err_pool_free:
441 dev_err(chan2dev(chan), "Failed to alloc descriptors\n");
442 while (--i >= 0)
443 dma_pool_free(chan->lli_pool, swdesc->lli[i].hwdesc, swdesc->lli[i].hwdesc_addr);
444 kfree(swdesc);
445
446 return NULL;
447 }
448
stm32_dma3_chan_desc_free(struct stm32_dma3_chan * chan,struct stm32_dma3_swdesc * swdesc)449 static void stm32_dma3_chan_desc_free(struct stm32_dma3_chan *chan,
450 struct stm32_dma3_swdesc *swdesc)
451 {
452 int i;
453
454 for (i = 0; i < swdesc->lli_size; i++)
455 dma_pool_free(chan->lli_pool, swdesc->lli[i].hwdesc, swdesc->lli[i].hwdesc_addr);
456
457 kfree(swdesc);
458 }
459
stm32_dma3_chan_vdesc_free(struct virt_dma_desc * vdesc)460 static void stm32_dma3_chan_vdesc_free(struct virt_dma_desc *vdesc)
461 {
462 struct stm32_dma3_swdesc *swdesc = to_stm32_dma3_swdesc(vdesc);
463 struct stm32_dma3_chan *chan = to_stm32_dma3_chan(vdesc->tx.chan);
464
465 stm32_dma3_chan_desc_free(chan, swdesc);
466 }
467
stm32_dma3_check_user_setting(struct stm32_dma3_chan * chan)468 static void stm32_dma3_check_user_setting(struct stm32_dma3_chan *chan)
469 {
470 struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
471 struct device *dev = chan2dev(chan);
472 u32 ctr1 = readl_relaxed(ddata->base + STM32_DMA3_CTR1(chan->id));
473 u32 cbr1 = readl_relaxed(ddata->base + STM32_DMA3_CBR1(chan->id));
474 u32 csar = readl_relaxed(ddata->base + STM32_DMA3_CSAR(chan->id));
475 u32 cdar = readl_relaxed(ddata->base + STM32_DMA3_CDAR(chan->id));
476 u32 cllr = readl_relaxed(ddata->base + STM32_DMA3_CLLR(chan->id));
477 u32 bndt = FIELD_GET(CBR1_BNDT, cbr1);
478 u32 sdw = 1 << FIELD_GET(CTR1_SDW_LOG2, ctr1);
479 u32 ddw = 1 << FIELD_GET(CTR1_DDW_LOG2, ctr1);
480 u32 sap = FIELD_GET(CTR1_SAP, ctr1);
481 u32 dap = FIELD_GET(CTR1_DAP, ctr1);
482
483 if (!bndt && !FIELD_GET(CLLR_UB1, cllr))
484 dev_err(dev, "null source block size and no update of this value\n");
485 if (bndt % sdw)
486 dev_err(dev, "source block size not multiple of src data width\n");
487 if (FIELD_GET(CTR1_PAM, ctr1) == CTR1_PAM_PACK_UNPACK && bndt % ddw)
488 dev_err(dev, "(un)packing mode w/ src block size not multiple of dst data width\n");
489 if (csar % sdw)
490 dev_err(dev, "unaligned source address not multiple of src data width\n");
491 if (cdar % ddw)
492 dev_err(dev, "unaligned destination address not multiple of dst data width\n");
493 if (sdw == DMA_SLAVE_BUSWIDTH_8_BYTES && port_is_ahb(ddata->ports_max_dw[sap]))
494 dev_err(dev, "double-word source data width not supported on port %u\n", sap);
495 if (ddw == DMA_SLAVE_BUSWIDTH_8_BYTES && port_is_ahb(ddata->ports_max_dw[dap]))
496 dev_err(dev, "double-word destination data width not supported on port %u\n", dap);
497 }
498
stm32_dma3_chan_prep_hwdesc(struct stm32_dma3_chan * chan,struct stm32_dma3_swdesc * swdesc,u32 curr,dma_addr_t src,dma_addr_t dst,u32 len,u32 ctr1,u32 ctr2,bool is_last,bool is_cyclic)499 static void stm32_dma3_chan_prep_hwdesc(struct stm32_dma3_chan *chan,
500 struct stm32_dma3_swdesc *swdesc,
501 u32 curr, dma_addr_t src, dma_addr_t dst, u32 len,
502 u32 ctr1, u32 ctr2, bool is_last, bool is_cyclic)
503 {
504 struct stm32_dma3_hwdesc *hwdesc;
505 dma_addr_t next_lli;
506 u32 next = curr + 1;
507
508 hwdesc = swdesc->lli[curr].hwdesc;
509 hwdesc->ctr1 = ctr1;
510 hwdesc->ctr2 = ctr2;
511 hwdesc->cbr1 = FIELD_PREP(CBR1_BNDT, len);
512 hwdesc->csar = src;
513 hwdesc->cdar = dst;
514
515 if (is_last) {
516 if (is_cyclic)
517 next_lli = swdesc->lli[0].hwdesc_addr;
518 else
519 next_lli = 0;
520 } else {
521 next_lli = swdesc->lli[next].hwdesc_addr;
522 }
523
524 hwdesc->cllr = 0;
525 if (next_lli) {
526 hwdesc->cllr |= CLLR_UT1 | CLLR_UT2 | CLLR_UB1;
527 hwdesc->cllr |= CLLR_USA | CLLR_UDA | CLLR_ULL;
528 hwdesc->cllr |= (next_lli & CLLR_LA);
529 }
530
531 /*
532 * Make sure to flush the CPU's write buffers so that the descriptors are ready to be read
533 * by DMA3. By explicitly using a write memory barrier here, instead of doing it with writel
534 * to enable the channel, we avoid an unnecessary barrier in the case where the descriptors
535 * are reused (DMA_CTRL_REUSE).
536 */
537 if (is_last)
538 dma_wmb();
539 }
540
stm32_dma3_get_max_dw(u32 chan_max_burst,enum stm32_dma3_port_data_width port_max_dw,u32 len,dma_addr_t addr)541 static enum dma_slave_buswidth stm32_dma3_get_max_dw(u32 chan_max_burst,
542 enum stm32_dma3_port_data_width port_max_dw,
543 u32 len, dma_addr_t addr)
544 {
545 enum dma_slave_buswidth max_dw = get_chan_max_dw(port_max_dw, chan_max_burst);
546
547 /* len and addr must be a multiple of dw */
548 return 1 << __ffs(len | addr | max_dw);
549 }
550
stm32_dma3_get_max_burst(u32 len,enum dma_slave_buswidth dw,u32 chan_max_burst,u32 bus_max_burst)551 static u32 stm32_dma3_get_max_burst(u32 len, enum dma_slave_buswidth dw,
552 u32 chan_max_burst, u32 bus_max_burst)
553 {
554 u32 max_burst = chan_max_burst ? chan_max_burst / dw : 1;
555
556 /* len is a multiple of dw, so if len is < chan_max_burst, shorten burst */
557 if (len < chan_max_burst)
558 max_burst = len / dw;
559
560 /*
561 * HW doesn't modify the burst if burst size <= half of the fifo size.
562 * If len is not a multiple of burst size, last burst is shortened by HW.
563 * Take care of maximum burst supported on interconnect bus.
564 */
565 return min_t(u32, max_burst, bus_max_burst);
566 }
567
stm32_dma3_chan_prep_hw(struct stm32_dma3_chan * chan,enum dma_transfer_direction dir,u32 * ccr,u32 * ctr1,u32 * ctr2,dma_addr_t src_addr,dma_addr_t dst_addr,u32 len)568 static int stm32_dma3_chan_prep_hw(struct stm32_dma3_chan *chan, enum dma_transfer_direction dir,
569 u32 *ccr, u32 *ctr1, u32 *ctr2,
570 dma_addr_t src_addr, dma_addr_t dst_addr, u32 len)
571 {
572 struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
573 struct dma_device dma_device = ddata->dma_dev;
574 u32 src_max_burst = STM32_DMA3_MAX_BURST_LEN, dst_max_burst = STM32_DMA3_MAX_BURST_LEN;
575 u32 sdw, ddw, sbl_max, dbl_max, tcem, init_dw, init_bl_max;
576 u32 _ctr1 = 0, _ctr2 = 0;
577 u32 ch_conf = chan->dt_config.ch_conf;
578 u32 tr_conf = chan->dt_config.tr_conf;
579 u32 sap = FIELD_GET(STM32_DMA3_DT_SAP, tr_conf), sap_max_dw;
580 u32 dap = FIELD_GET(STM32_DMA3_DT_DAP, tr_conf), dap_max_dw;
581
582 dev_dbg(chan2dev(chan), "%s from %pad to %pad\n",
583 dmaengine_get_direction_text(dir), &src_addr, &dst_addr);
584
585 sdw = chan->dma_config.src_addr_width ? : get_chan_max_dw(sap, chan->max_burst);
586 ddw = chan->dma_config.dst_addr_width ? : get_chan_max_dw(dap, chan->max_burst);
587 sbl_max = chan->dma_config.src_maxburst ? : 1;
588 dbl_max = chan->dma_config.dst_maxburst ? : 1;
589
590 /* Following conditions would raise User Setting Error interrupt */
591 if (!(dma_device.src_addr_widths & BIT(sdw)) || !(dma_device.dst_addr_widths & BIT(ddw))) {
592 dev_err(chan2dev(chan), "Bus width (src=%u, dst=%u) not supported\n", sdw, ddw);
593 return -EINVAL;
594 }
595
596 if (ddata->ports_max_dw[1] == DW_INVALID && (sap || dap)) {
597 dev_err(chan2dev(chan), "Only one master port, port 1 is not supported\n");
598 return -EINVAL;
599 }
600
601 sap_max_dw = ddata->ports_max_dw[sap];
602 dap_max_dw = ddata->ports_max_dw[dap];
603 if ((port_is_ahb(sap_max_dw) && sdw == DMA_SLAVE_BUSWIDTH_8_BYTES) ||
604 (port_is_ahb(dap_max_dw) && ddw == DMA_SLAVE_BUSWIDTH_8_BYTES)) {
605 dev_err(chan2dev(chan),
606 "8 bytes buswidth (src=%u, dst=%u) not supported on port (sap=%u, dap=%u\n",
607 sdw, ddw, sap, dap);
608 return -EINVAL;
609 }
610
611 if (FIELD_GET(STM32_DMA3_DT_SINC, tr_conf))
612 _ctr1 |= CTR1_SINC;
613 if (sap)
614 _ctr1 |= CTR1_SAP;
615 if (port_is_axi(sap_max_dw)) /* AXI - apply axi maximum burst limitation */
616 src_max_burst = ddata->axi_max_burst_len;
617 if (FIELD_GET(STM32_DMA3_DT_DINC, tr_conf))
618 _ctr1 |= CTR1_DINC;
619 if (dap)
620 _ctr1 |= CTR1_DAP;
621 if (port_is_axi(dap_max_dw)) /* AXI - apply axi maximum burst limitation */
622 dst_max_burst = ddata->axi_max_burst_len;
623
624 _ctr2 |= FIELD_PREP(CTR2_REQSEL, chan->dt_config.req_line) & ~CTR2_SWREQ;
625 if (FIELD_GET(STM32_DMA3_DT_BREQ, tr_conf))
626 _ctr2 |= CTR2_BREQ;
627 if (dir == DMA_DEV_TO_MEM && FIELD_GET(STM32_DMA3_DT_PFREQ, tr_conf))
628 _ctr2 |= CTR2_PFREQ;
629 tcem = FIELD_GET(STM32_DMA3_DT_TCEM, tr_conf);
630 _ctr2 |= FIELD_PREP(CTR2_TCEM, tcem);
631
632 /* Store TCEM to know on which event TC flag occurred */
633 chan->tcem = tcem;
634 /* Store direction for residue computation */
635 chan->dma_config.direction = dir;
636
637 switch (dir) {
638 case DMA_MEM_TO_DEV:
639 /* Set destination (device) data width and burst */
640 ddw = min_t(u32, ddw, stm32_dma3_get_max_dw(chan->max_burst, dap_max_dw,
641 len, dst_addr));
642 dbl_max = min_t(u32, dbl_max, stm32_dma3_get_max_burst(len, ddw, chan->max_burst,
643 dst_max_burst));
644
645 /* Set source (memory) data width and burst */
646 sdw = stm32_dma3_get_max_dw(chan->max_burst, sap_max_dw, len, src_addr);
647 sbl_max = stm32_dma3_get_max_burst(len, sdw, chan->max_burst, src_max_burst);
648 if (!!FIELD_GET(STM32_DMA3_DT_NOPACK, tr_conf)) {
649 sdw = ddw;
650 sbl_max = dbl_max;
651 }
652
653 _ctr1 |= FIELD_PREP(CTR1_SDW_LOG2, ilog2(sdw));
654 _ctr1 |= FIELD_PREP(CTR1_SBL_1, sbl_max - 1);
655 _ctr1 |= FIELD_PREP(CTR1_DDW_LOG2, ilog2(ddw));
656 _ctr1 |= FIELD_PREP(CTR1_DBL_1, dbl_max - 1);
657
658 if (ddw != sdw) {
659 _ctr1 |= FIELD_PREP(CTR1_PAM, CTR1_PAM_PACK_UNPACK);
660 /* Should never reach this case as ddw is clamped down */
661 if (len & (ddw - 1)) {
662 dev_err(chan2dev(chan),
663 "Packing mode is enabled and len is not multiple of ddw");
664 return -EINVAL;
665 }
666 }
667
668 /* dst = dev */
669 _ctr2 |= CTR2_DREQ;
670
671 break;
672
673 case DMA_DEV_TO_MEM:
674 /* Set source (device) data width and burst */
675 sdw = min_t(u32, sdw, stm32_dma3_get_max_dw(chan->max_burst, sap_max_dw,
676 len, src_addr));
677 sbl_max = min_t(u32, sbl_max, stm32_dma3_get_max_burst(len, sdw, chan->max_burst,
678 src_max_burst));
679
680 /* Set destination (memory) data width and burst */
681 ddw = stm32_dma3_get_max_dw(chan->max_burst, dap_max_dw, len, dst_addr);
682 dbl_max = stm32_dma3_get_max_burst(len, ddw, chan->max_burst, dst_max_burst);
683 if (!!FIELD_GET(STM32_DMA3_DT_NOPACK, tr_conf) ||
684 ((_ctr2 & CTR2_PFREQ) && ddw > sdw)) { /* Packing to wider ddw not supported */
685 ddw = sdw;
686 dbl_max = sbl_max;
687 }
688
689 _ctr1 |= FIELD_PREP(CTR1_SDW_LOG2, ilog2(sdw));
690 _ctr1 |= FIELD_PREP(CTR1_SBL_1, sbl_max - 1);
691 _ctr1 |= FIELD_PREP(CTR1_DDW_LOG2, ilog2(ddw));
692 _ctr1 |= FIELD_PREP(CTR1_DBL_1, dbl_max - 1);
693
694 if (ddw != sdw) {
695 _ctr1 |= FIELD_PREP(CTR1_PAM, CTR1_PAM_PACK_UNPACK);
696 /* Should never reach this case as ddw is clamped down */
697 if (len & (ddw - 1)) {
698 dev_err(chan2dev(chan),
699 "Packing mode is enabled and len is not multiple of ddw\n");
700 return -EINVAL;
701 }
702 }
703
704 /* dst = mem */
705 _ctr2 &= ~CTR2_DREQ;
706
707 break;
708
709 case DMA_MEM_TO_MEM:
710 /* Set source (memory) data width and burst */
711 init_dw = sdw;
712 init_bl_max = sbl_max;
713 sdw = stm32_dma3_get_max_dw(chan->max_burst, sap_max_dw, len, src_addr);
714 sbl_max = stm32_dma3_get_max_burst(len, sdw, chan->max_burst, src_max_burst);
715 if (chan->config_set & STM32_DMA3_CFG_SET_DMA) {
716 sdw = min_t(u32, init_dw, sdw);
717 sbl_max = min_t(u32, init_bl_max, stm32_dma3_get_max_burst(len, sdw,
718 chan->max_burst,
719 src_max_burst));
720 }
721
722 /* Set destination (memory) data width and burst */
723 init_dw = ddw;
724 init_bl_max = dbl_max;
725 ddw = stm32_dma3_get_max_dw(chan->max_burst, dap_max_dw, len, dst_addr);
726 dbl_max = stm32_dma3_get_max_burst(len, ddw, chan->max_burst, dst_max_burst);
727 if (chan->config_set & STM32_DMA3_CFG_SET_DMA) {
728 ddw = min_t(u32, init_dw, ddw);
729 dbl_max = min_t(u32, init_bl_max, stm32_dma3_get_max_burst(len, ddw,
730 chan->max_burst,
731 dst_max_burst));
732 }
733
734 _ctr1 |= FIELD_PREP(CTR1_SDW_LOG2, ilog2(sdw));
735 _ctr1 |= FIELD_PREP(CTR1_SBL_1, sbl_max - 1);
736 _ctr1 |= FIELD_PREP(CTR1_DDW_LOG2, ilog2(ddw));
737 _ctr1 |= FIELD_PREP(CTR1_DBL_1, dbl_max - 1);
738
739 if (ddw != sdw) {
740 _ctr1 |= FIELD_PREP(CTR1_PAM, CTR1_PAM_PACK_UNPACK);
741 /* Should never reach this case as ddw is clamped down */
742 if (len & (ddw - 1)) {
743 dev_err(chan2dev(chan),
744 "Packing mode is enabled and len is not multiple of ddw");
745 return -EINVAL;
746 }
747 }
748
749 /* CTR2_REQSEL/DREQ/BREQ/PFREQ are ignored with CTR2_SWREQ=1 */
750 _ctr2 |= CTR2_SWREQ;
751
752 break;
753
754 default:
755 dev_err(chan2dev(chan), "Direction %s not supported\n",
756 dmaengine_get_direction_text(dir));
757 return -EINVAL;
758 }
759
760 *ccr |= FIELD_PREP(CCR_PRIO, FIELD_GET(STM32_DMA3_DT_PRIO, ch_conf));
761 *ctr1 = _ctr1;
762 *ctr2 = _ctr2;
763
764 dev_dbg(chan2dev(chan), "%s: sdw=%u bytes sbl=%u beats ddw=%u bytes dbl=%u beats\n",
765 __func__, sdw, sbl_max, ddw, dbl_max);
766
767 return 0;
768 }
769
stm32_dma3_chan_start(struct stm32_dma3_chan * chan)770 static void stm32_dma3_chan_start(struct stm32_dma3_chan *chan)
771 {
772 struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
773 struct virt_dma_desc *vdesc;
774 struct stm32_dma3_hwdesc *hwdesc;
775 u32 id = chan->id;
776 u32 csr, ccr;
777
778 vdesc = vchan_next_desc(&chan->vchan);
779 if (!vdesc) {
780 chan->swdesc = NULL;
781 return;
782 }
783 list_del(&vdesc->node);
784
785 chan->swdesc = to_stm32_dma3_swdesc(vdesc);
786 hwdesc = chan->swdesc->lli[0].hwdesc;
787
788 stm32_dma3_chan_dump_hwdesc(chan, chan->swdesc);
789
790 writel_relaxed(chan->swdesc->ccr, ddata->base + STM32_DMA3_CCR(id));
791 writel_relaxed(hwdesc->ctr1, ddata->base + STM32_DMA3_CTR1(id));
792 writel_relaxed(hwdesc->ctr2, ddata->base + STM32_DMA3_CTR2(id));
793 writel_relaxed(hwdesc->cbr1, ddata->base + STM32_DMA3_CBR1(id));
794 writel_relaxed(hwdesc->csar, ddata->base + STM32_DMA3_CSAR(id));
795 writel_relaxed(hwdesc->cdar, ddata->base + STM32_DMA3_CDAR(id));
796 writel_relaxed(hwdesc->cllr, ddata->base + STM32_DMA3_CLLR(id));
797
798 /* Clear any pending interrupts */
799 csr = readl_relaxed(ddata->base + STM32_DMA3_CSR(id));
800 if (csr & CSR_ALL_F)
801 writel_relaxed(csr, ddata->base + STM32_DMA3_CFCR(id));
802
803 stm32_dma3_chan_dump_reg(chan);
804
805 ccr = readl_relaxed(ddata->base + STM32_DMA3_CCR(id));
806 writel_relaxed(ccr | CCR_EN, ddata->base + STM32_DMA3_CCR(id));
807
808 chan->dma_status = DMA_IN_PROGRESS;
809
810 dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan);
811 }
812
stm32_dma3_chan_suspend(struct stm32_dma3_chan * chan,bool susp)813 static int stm32_dma3_chan_suspend(struct stm32_dma3_chan *chan, bool susp)
814 {
815 struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
816 u32 csr, ccr = readl_relaxed(ddata->base + STM32_DMA3_CCR(chan->id)) & ~CCR_EN;
817 int ret = 0;
818
819 if (susp)
820 ccr |= CCR_SUSP;
821 else
822 ccr &= ~CCR_SUSP;
823
824 writel_relaxed(ccr, ddata->base + STM32_DMA3_CCR(chan->id));
825
826 if (susp) {
827 ret = readl_relaxed_poll_timeout_atomic(ddata->base + STM32_DMA3_CSR(chan->id), csr,
828 csr & CSR_SUSPF, 1, 10);
829 if (!ret)
830 writel_relaxed(CFCR_SUSPF, ddata->base + STM32_DMA3_CFCR(chan->id));
831
832 stm32_dma3_chan_dump_reg(chan);
833 }
834
835 return ret;
836 }
837
stm32_dma3_chan_reset(struct stm32_dma3_chan * chan)838 static void stm32_dma3_chan_reset(struct stm32_dma3_chan *chan)
839 {
840 struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
841 u32 ccr = readl_relaxed(ddata->base + STM32_DMA3_CCR(chan->id)) & ~CCR_EN;
842
843 writel_relaxed(ccr |= CCR_RESET, ddata->base + STM32_DMA3_CCR(chan->id));
844 }
845
stm32_dma3_chan_get_curr_hwdesc(struct stm32_dma3_swdesc * swdesc,u32 cllr,u32 * residue)846 static int stm32_dma3_chan_get_curr_hwdesc(struct stm32_dma3_swdesc *swdesc, u32 cllr, u32 *residue)
847 {
848 u32 i, lli_offset, next_lli_offset = cllr & CLLR_LA;
849
850 /* If cllr is null, it means it is either the last or single item */
851 if (!cllr)
852 return swdesc->lli_size - 1;
853
854 /* In cyclic mode, go fast and first check we are not on the last item */
855 if (swdesc->cyclic && next_lli_offset == (swdesc->lli[0].hwdesc_addr & CLLR_LA))
856 return swdesc->lli_size - 1;
857
858 /* As transfer is in progress, look backward from the last item */
859 for (i = swdesc->lli_size - 1; i > 0; i--) {
860 *residue += FIELD_GET(CBR1_BNDT, swdesc->lli[i].hwdesc->cbr1);
861 lli_offset = swdesc->lli[i].hwdesc_addr & CLLR_LA;
862 if (lli_offset == next_lli_offset)
863 return i - 1;
864 }
865
866 return -EINVAL;
867 }
868
stm32_dma3_chan_set_residue(struct stm32_dma3_chan * chan,struct stm32_dma3_swdesc * swdesc,struct dma_tx_state * txstate)869 static void stm32_dma3_chan_set_residue(struct stm32_dma3_chan *chan,
870 struct stm32_dma3_swdesc *swdesc,
871 struct dma_tx_state *txstate)
872 {
873 struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
874 struct device *dev = chan2dev(chan);
875 struct stm32_dma3_hwdesc *hwdesc;
876 u32 residue, curr_lli, csr, cdar, cbr1, cllr, bndt, fifol;
877 bool pack_unpack;
878 int ret;
879
880 csr = readl_relaxed(ddata->base + STM32_DMA3_CSR(chan->id));
881 if (!(csr & CSR_IDLEF) && chan->dma_status != DMA_PAUSED) {
882 /* Suspend current transfer to read registers for a snapshot */
883 writel_relaxed(swdesc->ccr | CCR_SUSP, ddata->base + STM32_DMA3_CCR(chan->id));
884 ret = readl_relaxed_poll_timeout_atomic(ddata->base + STM32_DMA3_CSR(chan->id), csr,
885 csr & (CSR_SUSPF | CSR_IDLEF), 1, 10);
886
887 if (ret || ((csr & CSR_TCF) && (csr & CSR_IDLEF))) {
888 writel_relaxed(CFCR_SUSPF, ddata->base + STM32_DMA3_CFCR(chan->id));
889 writel_relaxed(swdesc->ccr, ddata->base + STM32_DMA3_CCR(chan->id));
890 if (ret)
891 dev_err(dev, "Channel suspension timeout, csr=%08x\n", csr);
892 }
893 }
894
895 /* If channel is still active (CSR_IDLEF is not set), can't get a reliable residue */
896 if (!(csr & CSR_IDLEF))
897 dev_warn(dev, "Can't get residue: channel still active, csr=%08x\n", csr);
898
899 /*
900 * If channel is not suspended, but Idle and Transfer Complete are set,
901 * linked-list is over, no residue
902 */
903 if (!(csr & CSR_SUSPF) && (csr & CSR_TCF) && (csr & CSR_IDLEF))
904 return;
905
906 /* Read registers to have a snapshot */
907 cllr = readl_relaxed(ddata->base + STM32_DMA3_CLLR(chan->id));
908 cbr1 = readl_relaxed(ddata->base + STM32_DMA3_CBR1(chan->id));
909 cdar = readl_relaxed(ddata->base + STM32_DMA3_CDAR(chan->id));
910
911 /* Resume current transfer */
912 if (csr & CSR_SUSPF) {
913 writel_relaxed(CFCR_SUSPF, ddata->base + STM32_DMA3_CFCR(chan->id));
914 writel_relaxed(swdesc->ccr, ddata->base + STM32_DMA3_CCR(chan->id));
915 }
916
917 /* Add current BNDT */
918 bndt = FIELD_GET(CBR1_BNDT, cbr1);
919 residue = bndt;
920
921 /* Get current hwdesc and cumulate residue of pending hwdesc BNDT */
922 ret = stm32_dma3_chan_get_curr_hwdesc(swdesc, cllr, &residue);
923 if (ret < 0) {
924 dev_err(chan2dev(chan), "Can't get residue: current hwdesc not found\n");
925 return;
926 }
927 curr_lli = ret;
928
929 /* Read current FIFO level - in units of programmed destination data width */
930 hwdesc = swdesc->lli[curr_lli].hwdesc;
931 fifol = FIELD_GET(CSR_FIFOL, csr) * (1 << FIELD_GET(CTR1_DDW_LOG2, hwdesc->ctr1));
932 /* If the FIFO contains as many bytes as its size, it can't contain more */
933 if (fifol == (1 << (chan->fifo_size + 1)))
934 goto skip_fifol_update;
935
936 /*
937 * In case of PACKING (Destination burst length > Source burst length) or UNPACKING
938 * (Source burst length > Destination burst length), bytes could be pending in the FIFO
939 * (to be packed up to Destination burst length or unpacked into Destination burst length
940 * chunks).
941 * BNDT is not reliable, as it reflects the number of bytes read from the source but not the
942 * number of bytes written to the destination.
943 * FIFOL is also not sufficient, because it reflects the number of available write beats in
944 * units of Destination data width but not the bytes not yet packed or unpacked.
945 * In case of Destination increment DINC, it is possible to compute the number of bytes in
946 * the FIFO:
947 * fifol_in_bytes = bytes_read - bytes_written.
948 */
949 pack_unpack = !!(FIELD_GET(CTR1_PAM, hwdesc->ctr1) == CTR1_PAM_PACK_UNPACK);
950 if (pack_unpack && (hwdesc->ctr1 & CTR1_DINC)) {
951 int bytes_read = FIELD_GET(CBR1_BNDT, hwdesc->cbr1) - bndt;
952 int bytes_written = cdar - hwdesc->cdar;
953
954 if (bytes_read > 0)
955 fifol = bytes_read - bytes_written;
956 }
957
958 skip_fifol_update:
959 if (fifol) {
960 dev_dbg(chan2dev(chan), "%u byte(s) in the FIFO\n", fifol);
961 dma_set_in_flight_bytes(txstate, fifol);
962 /*
963 * Residue is already accurate for DMA_MEM_TO_DEV as BNDT reflects data read from
964 * the source memory buffer, so just need to add fifol to residue in case of
965 * DMA_DEV_TO_MEM transfer because these bytes are not yet written in destination
966 * memory buffer.
967 */
968 if (chan->dma_config.direction == DMA_DEV_TO_MEM)
969 residue += fifol;
970 }
971 dma_set_residue(txstate, residue);
972 }
973
stm32_dma3_chan_stop(struct stm32_dma3_chan * chan)974 static int stm32_dma3_chan_stop(struct stm32_dma3_chan *chan)
975 {
976 struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
977 u32 ccr;
978 int ret = 0;
979
980 chan->dma_status = DMA_COMPLETE;
981
982 /* Disable interrupts */
983 ccr = readl_relaxed(ddata->base + STM32_DMA3_CCR(chan->id));
984 writel_relaxed(ccr & ~(CCR_ALLIE | CCR_EN), ddata->base + STM32_DMA3_CCR(chan->id));
985
986 if (!(ccr & CCR_SUSP) && (ccr & CCR_EN)) {
987 /* Suspend the channel */
988 ret = stm32_dma3_chan_suspend(chan, true);
989 if (ret)
990 dev_warn(chan2dev(chan), "%s: timeout, data might be lost\n", __func__);
991 }
992
993 /*
994 * Reset the channel: this causes the reset of the FIFO and the reset of the channel
995 * internal state, the reset of CCR_EN and CCR_SUSP bits.
996 */
997 stm32_dma3_chan_reset(chan);
998
999 return ret;
1000 }
1001
stm32_dma3_chan_complete(struct stm32_dma3_chan * chan)1002 static void stm32_dma3_chan_complete(struct stm32_dma3_chan *chan)
1003 {
1004 if (!chan->swdesc)
1005 return;
1006
1007 vchan_cookie_complete(&chan->swdesc->vdesc);
1008 chan->swdesc = NULL;
1009 stm32_dma3_chan_start(chan);
1010 }
1011
stm32_dma3_chan_irq(int irq,void * devid)1012 static irqreturn_t stm32_dma3_chan_irq(int irq, void *devid)
1013 {
1014 struct stm32_dma3_chan *chan = devid;
1015 struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
1016 u32 misr, csr, ccr;
1017
1018 spin_lock(&chan->vchan.lock);
1019
1020 misr = readl_relaxed(ddata->base + STM32_DMA3_MISR);
1021 if (!(misr & MISR_MIS(chan->id))) {
1022 spin_unlock(&chan->vchan.lock);
1023 return IRQ_NONE;
1024 }
1025
1026 csr = readl_relaxed(ddata->base + STM32_DMA3_CSR(chan->id));
1027 ccr = readl_relaxed(ddata->base + STM32_DMA3_CCR(chan->id)) & CCR_ALLIE;
1028
1029 if (csr & CSR_TCF && ccr & CCR_TCIE) {
1030 if (chan->swdesc->cyclic)
1031 vchan_cyclic_callback(&chan->swdesc->vdesc);
1032 else
1033 stm32_dma3_chan_complete(chan);
1034 }
1035
1036 if (csr & CSR_USEF && ccr & CCR_USEIE) {
1037 dev_err(chan2dev(chan), "User setting error\n");
1038 chan->dma_status = DMA_ERROR;
1039 /* CCR.EN automatically cleared by HW */
1040 stm32_dma3_check_user_setting(chan);
1041 stm32_dma3_chan_reset(chan);
1042 }
1043
1044 if (csr & CSR_ULEF && ccr & CCR_ULEIE) {
1045 dev_err(chan2dev(chan), "Update link transfer error\n");
1046 chan->dma_status = DMA_ERROR;
1047 /* CCR.EN automatically cleared by HW */
1048 stm32_dma3_chan_reset(chan);
1049 }
1050
1051 if (csr & CSR_DTEF && ccr & CCR_DTEIE) {
1052 dev_err(chan2dev(chan), "Data transfer error\n");
1053 chan->dma_status = DMA_ERROR;
1054 /* CCR.EN automatically cleared by HW */
1055 stm32_dma3_chan_reset(chan);
1056 }
1057
1058 /*
1059 * Half Transfer Interrupt may be disabled but Half Transfer Flag can be set,
1060 * ensure HTF flag to be cleared, with other flags.
1061 */
1062 csr &= (ccr | CCR_HTIE);
1063
1064 if (csr)
1065 writel_relaxed(csr, ddata->base + STM32_DMA3_CFCR(chan->id));
1066
1067 spin_unlock(&chan->vchan.lock);
1068
1069 return IRQ_HANDLED;
1070 }
1071
stm32_dma3_get_chan_sem(struct stm32_dma3_chan * chan)1072 static int stm32_dma3_get_chan_sem(struct stm32_dma3_chan *chan)
1073 {
1074 struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
1075 u32 csemcr, ccid;
1076
1077 csemcr = readl_relaxed(ddata->base + STM32_DMA3_CSEMCR(chan->id));
1078 /* Make an attempt to take the channel semaphore if not already taken */
1079 if (!(csemcr & CSEMCR_SEM_MUTEX)) {
1080 writel_relaxed(CSEMCR_SEM_MUTEX, ddata->base + STM32_DMA3_CSEMCR(chan->id));
1081 csemcr = readl_relaxed(ddata->base + STM32_DMA3_CSEMCR(chan->id));
1082 }
1083
1084 /* Check if channel is under CID1 control */
1085 ccid = FIELD_GET(CSEMCR_SEM_CCID, csemcr);
1086 if (!(csemcr & CSEMCR_SEM_MUTEX) || ccid != CCIDCFGR_CID1)
1087 goto bad_cid;
1088
1089 chan->semaphore_taken = true;
1090 dev_dbg(chan2dev(chan), "under CID1 control (semcr=0x%08x)\n", csemcr);
1091
1092 return 0;
1093
1094 bad_cid:
1095 chan->semaphore_taken = false;
1096 dev_err(chan2dev(chan), "not under CID1 control (in-use by CID%d)\n", ccid);
1097
1098 return -EACCES;
1099 }
1100
stm32_dma3_put_chan_sem(struct stm32_dma3_chan * chan)1101 static void stm32_dma3_put_chan_sem(struct stm32_dma3_chan *chan)
1102 {
1103 struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
1104
1105 if (chan->semaphore_taken) {
1106 writel_relaxed(0, ddata->base + STM32_DMA3_CSEMCR(chan->id));
1107 chan->semaphore_taken = false;
1108 dev_dbg(chan2dev(chan), "no more under CID1 control\n");
1109 }
1110 }
1111
stm32_dma3_alloc_chan_resources(struct dma_chan * c)1112 static int stm32_dma3_alloc_chan_resources(struct dma_chan *c)
1113 {
1114 struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c);
1115 struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
1116 int ret;
1117
1118 ret = pm_runtime_resume_and_get(ddata2dev(ddata));
1119 if (ret < 0)
1120 return ret;
1121
1122 /* Ensure the channel is free */
1123 if (chan->semaphore_mode &&
1124 readl_relaxed(ddata->base + STM32_DMA3_CSEMCR(chan->id)) & CSEMCR_SEM_MUTEX) {
1125 ret = -EBUSY;
1126 goto err_put_sync;
1127 }
1128
1129 chan->lli_pool = dmam_pool_create(dev_name(&c->dev->device), c->device->dev,
1130 sizeof(struct stm32_dma3_hwdesc),
1131 __alignof__(struct stm32_dma3_hwdesc), SZ_64K);
1132 if (!chan->lli_pool) {
1133 dev_err(chan2dev(chan), "Failed to create LLI pool\n");
1134 ret = -ENOMEM;
1135 goto err_put_sync;
1136 }
1137
1138 /* Take the channel semaphore */
1139 if (chan->semaphore_mode) {
1140 ret = stm32_dma3_get_chan_sem(chan);
1141 if (ret)
1142 goto err_pool_destroy;
1143 }
1144
1145 return 0;
1146
1147 err_pool_destroy:
1148 dmam_pool_destroy(chan->lli_pool);
1149 chan->lli_pool = NULL;
1150
1151 err_put_sync:
1152 pm_runtime_put_sync(ddata2dev(ddata));
1153
1154 return ret;
1155 }
1156
stm32_dma3_free_chan_resources(struct dma_chan * c)1157 static void stm32_dma3_free_chan_resources(struct dma_chan *c)
1158 {
1159 struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c);
1160 struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
1161 unsigned long flags;
1162
1163 /* Ensure channel is in idle state */
1164 spin_lock_irqsave(&chan->vchan.lock, flags);
1165 stm32_dma3_chan_stop(chan);
1166 chan->swdesc = NULL;
1167 spin_unlock_irqrestore(&chan->vchan.lock, flags);
1168
1169 vchan_free_chan_resources(to_virt_chan(c));
1170
1171 dmam_pool_destroy(chan->lli_pool);
1172 chan->lli_pool = NULL;
1173
1174 /* Release the channel semaphore */
1175 if (chan->semaphore_mode)
1176 stm32_dma3_put_chan_sem(chan);
1177
1178 pm_runtime_put_sync(ddata2dev(ddata));
1179
1180 /* Reset configuration */
1181 memset(&chan->dt_config, 0, sizeof(chan->dt_config));
1182 memset(&chan->dma_config, 0, sizeof(chan->dma_config));
1183 chan->config_set = 0;
1184 }
1185
stm32_dma3_get_ll_count(struct stm32_dma3_chan * chan,size_t len,bool prevent_refactor)1186 static u32 stm32_dma3_get_ll_count(struct stm32_dma3_chan *chan, size_t len, bool prevent_refactor)
1187 {
1188 u32 count;
1189
1190 if (prevent_refactor)
1191 return DIV_ROUND_UP(len, STM32_DMA3_MAX_BLOCK_SIZE);
1192
1193 count = len / STM32_DMA3_MAX_BLOCK_SIZE;
1194 len -= (len / STM32_DMA3_MAX_BLOCK_SIZE) * STM32_DMA3_MAX_BLOCK_SIZE;
1195
1196 if (len >= chan->max_burst) {
1197 count += 1; /* len < STM32_DMA3_MAX_BLOCK_SIZE here, so it fits in one item */
1198 len -= (len / chan->max_burst) * chan->max_burst;
1199 }
1200
1201 /* Unaligned remainder fits in one extra item */
1202 if (len > 0)
1203 count += 1;
1204
1205 return count;
1206 }
1207
stm32_dma3_init_chan_config_for_memcpy(struct stm32_dma3_chan * chan,dma_addr_t dst,dma_addr_t src)1208 static void stm32_dma3_init_chan_config_for_memcpy(struct stm32_dma3_chan *chan,
1209 dma_addr_t dst, dma_addr_t src)
1210 {
1211 struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
1212 u32 dw = get_chan_max_dw(ddata->ports_max_dw[0], chan->max_burst); /* port 0 by default */
1213 u32 burst = chan->max_burst / dw;
1214
1215 /* Initialize dt_config if channel not pre-configured through DT */
1216 if (!(chan->config_set & STM32_DMA3_CFG_SET_DT)) {
1217 chan->dt_config.ch_conf = FIELD_PREP(STM32_DMA3_DT_PRIO, CCR_PRIO_VERY_HIGH);
1218 chan->dt_config.ch_conf |= FIELD_PREP(STM32_DMA3_DT_FIFO, chan->fifo_size);
1219 chan->dt_config.tr_conf = STM32_DMA3_DT_SINC | STM32_DMA3_DT_DINC;
1220 chan->dt_config.tr_conf |= FIELD_PREP(STM32_DMA3_DT_TCEM, CTR2_TCEM_CHANNEL);
1221 }
1222
1223 /* Initialize dma_config if dmaengine_slave_config() not used */
1224 if (!(chan->config_set & STM32_DMA3_CFG_SET_DMA)) {
1225 chan->dma_config.src_addr_width = dw;
1226 chan->dma_config.dst_addr_width = dw;
1227 chan->dma_config.src_maxburst = burst;
1228 chan->dma_config.dst_maxburst = burst;
1229 chan->dma_config.src_addr = src;
1230 chan->dma_config.dst_addr = dst;
1231 }
1232 }
1233
stm32_dma3_prep_dma_memcpy(struct dma_chan * c,dma_addr_t dst,dma_addr_t src,size_t len,unsigned long flags)1234 static struct dma_async_tx_descriptor *stm32_dma3_prep_dma_memcpy(struct dma_chan *c,
1235 dma_addr_t dst, dma_addr_t src,
1236 size_t len, unsigned long flags)
1237 {
1238 struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c);
1239 struct stm32_dma3_swdesc *swdesc;
1240 size_t next_size, offset;
1241 u32 count, i, ctr1, ctr2;
1242 bool prevent_refactor = !!FIELD_GET(STM32_DMA3_DT_NOPACK, chan->dt_config.tr_conf) ||
1243 !!FIELD_GET(STM32_DMA3_DT_NOREFACT, chan->dt_config.tr_conf);
1244
1245 /* Semaphore could be lost during suspend/resume */
1246 if (chan->semaphore_mode && !chan->semaphore_taken)
1247 return NULL;
1248
1249 count = stm32_dma3_get_ll_count(chan, len, prevent_refactor);
1250
1251 swdesc = stm32_dma3_chan_desc_alloc(chan, count);
1252 if (!swdesc)
1253 return NULL;
1254
1255 if (chan->config_set != STM32_DMA3_CFG_SET_BOTH)
1256 stm32_dma3_init_chan_config_for_memcpy(chan, dst, src);
1257
1258 for (i = 0, offset = 0; offset < len; i++, offset += next_size) {
1259 size_t remaining;
1260 int ret;
1261
1262 remaining = len - offset;
1263 next_size = min_t(size_t, remaining, STM32_DMA3_MAX_BLOCK_SIZE);
1264
1265 if (!prevent_refactor &&
1266 (next_size < STM32_DMA3_MAX_BLOCK_SIZE && next_size >= chan->max_burst))
1267 next_size = chan->max_burst * (remaining / chan->max_burst);
1268
1269 ret = stm32_dma3_chan_prep_hw(chan, DMA_MEM_TO_MEM, &swdesc->ccr, &ctr1, &ctr2,
1270 src + offset, dst + offset, next_size);
1271 if (ret)
1272 goto err_desc_free;
1273
1274 stm32_dma3_chan_prep_hwdesc(chan, swdesc, i, src + offset, dst + offset, next_size,
1275 ctr1, ctr2, next_size == remaining, false);
1276 }
1277
1278 /* Enable Errors interrupts */
1279 swdesc->ccr |= CCR_USEIE | CCR_ULEIE | CCR_DTEIE;
1280 /* Enable Transfer state interrupts */
1281 swdesc->ccr |= CCR_TCIE;
1282
1283 swdesc->cyclic = false;
1284
1285 return vchan_tx_prep(&chan->vchan, &swdesc->vdesc, flags);
1286
1287 err_desc_free:
1288 stm32_dma3_chan_desc_free(chan, swdesc);
1289
1290 return NULL;
1291 }
1292
stm32_dma3_prep_slave_sg(struct dma_chan * c,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction dir,unsigned long flags,void * context)1293 static struct dma_async_tx_descriptor *stm32_dma3_prep_slave_sg(struct dma_chan *c,
1294 struct scatterlist *sgl,
1295 unsigned int sg_len,
1296 enum dma_transfer_direction dir,
1297 unsigned long flags, void *context)
1298 {
1299 struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c);
1300 struct stm32_dma3_swdesc *swdesc;
1301 struct scatterlist *sg;
1302 size_t len;
1303 dma_addr_t sg_addr, dev_addr, src, dst;
1304 u32 i, j, count, ctr1, ctr2;
1305 bool prevent_refactor = !!FIELD_GET(STM32_DMA3_DT_NOPACK, chan->dt_config.tr_conf) ||
1306 !!FIELD_GET(STM32_DMA3_DT_NOREFACT, chan->dt_config.tr_conf);
1307 int ret;
1308
1309 /* Semaphore could be lost during suspend/resume */
1310 if (chan->semaphore_mode && !chan->semaphore_taken)
1311 return NULL;
1312
1313 count = 0;
1314 for_each_sg(sgl, sg, sg_len, i)
1315 count += stm32_dma3_get_ll_count(chan, sg_dma_len(sg), prevent_refactor);
1316
1317 swdesc = stm32_dma3_chan_desc_alloc(chan, count);
1318 if (!swdesc)
1319 return NULL;
1320
1321 /* sg_len and i correspond to the initial sgl; count and j correspond to the hwdesc LL */
1322 j = 0;
1323 for_each_sg(sgl, sg, sg_len, i) {
1324 sg_addr = sg_dma_address(sg);
1325 dev_addr = (dir == DMA_MEM_TO_DEV) ? chan->dma_config.dst_addr :
1326 chan->dma_config.src_addr;
1327 len = sg_dma_len(sg);
1328
1329 do {
1330 size_t chunk = min_t(size_t, len, STM32_DMA3_MAX_BLOCK_SIZE);
1331
1332 if (!prevent_refactor &&
1333 (chunk < STM32_DMA3_MAX_BLOCK_SIZE && chunk >= chan->max_burst))
1334 chunk = chan->max_burst * (len / chan->max_burst);
1335
1336 if (dir == DMA_MEM_TO_DEV) {
1337 src = sg_addr;
1338 dst = dev_addr;
1339
1340 ret = stm32_dma3_chan_prep_hw(chan, dir, &swdesc->ccr, &ctr1, &ctr2,
1341 src, dst, chunk);
1342
1343 if (FIELD_GET(CTR1_DINC, ctr1))
1344 dev_addr += chunk;
1345 } else { /* (dir == DMA_DEV_TO_MEM || dir == DMA_MEM_TO_MEM) */
1346 src = dev_addr;
1347 dst = sg_addr;
1348
1349 ret = stm32_dma3_chan_prep_hw(chan, dir, &swdesc->ccr, &ctr1, &ctr2,
1350 src, dst, chunk);
1351
1352 if (FIELD_GET(CTR1_SINC, ctr1))
1353 dev_addr += chunk;
1354 }
1355
1356 if (ret)
1357 goto err_desc_free;
1358
1359 stm32_dma3_chan_prep_hwdesc(chan, swdesc, j, src, dst, chunk,
1360 ctr1, ctr2, j == (count - 1), false);
1361
1362 sg_addr += chunk;
1363 len -= chunk;
1364 j++;
1365 } while (len);
1366 }
1367
1368 if (count != sg_len && chan->tcem != CTR2_TCEM_CHANNEL)
1369 dev_warn(chan2dev(chan), "Linked-list refactored, %d items instead of %d\n",
1370 count, sg_len);
1371
1372 /* Enable Error interrupts */
1373 swdesc->ccr |= CCR_USEIE | CCR_ULEIE | CCR_DTEIE;
1374 /* Enable Transfer state interrupts */
1375 swdesc->ccr |= CCR_TCIE;
1376
1377 swdesc->cyclic = false;
1378
1379 return vchan_tx_prep(&chan->vchan, &swdesc->vdesc, flags);
1380
1381 err_desc_free:
1382 stm32_dma3_chan_desc_free(chan, swdesc);
1383
1384 return NULL;
1385 }
1386
stm32_dma3_prep_dma_cyclic(struct dma_chan * c,dma_addr_t buf_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction dir,unsigned long flags)1387 static struct dma_async_tx_descriptor *stm32_dma3_prep_dma_cyclic(struct dma_chan *c,
1388 dma_addr_t buf_addr,
1389 size_t buf_len, size_t period_len,
1390 enum dma_transfer_direction dir,
1391 unsigned long flags)
1392 {
1393 struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c);
1394 struct stm32_dma3_swdesc *swdesc;
1395 dma_addr_t src, dst;
1396 u32 count, i, ctr1, ctr2;
1397 int ret;
1398
1399 /* Semaphore could be lost during suspend/resume */
1400 if (chan->semaphore_mode && !chan->semaphore_taken)
1401 return NULL;
1402
1403 if (!buf_len || !period_len || period_len > STM32_DMA3_MAX_BLOCK_SIZE) {
1404 dev_err(chan2dev(chan), "Invalid buffer/period length\n");
1405 return NULL;
1406 }
1407
1408 if (buf_len % period_len) {
1409 dev_err(chan2dev(chan), "Buffer length not multiple of period length\n");
1410 return NULL;
1411 }
1412
1413 count = buf_len / period_len;
1414 swdesc = stm32_dma3_chan_desc_alloc(chan, count);
1415 if (!swdesc)
1416 return NULL;
1417
1418 if (dir == DMA_MEM_TO_DEV) {
1419 src = buf_addr;
1420 dst = chan->dma_config.dst_addr;
1421
1422 ret = stm32_dma3_chan_prep_hw(chan, DMA_MEM_TO_DEV, &swdesc->ccr, &ctr1, &ctr2,
1423 src, dst, period_len);
1424 } else if (dir == DMA_DEV_TO_MEM) {
1425 src = chan->dma_config.src_addr;
1426 dst = buf_addr;
1427
1428 ret = stm32_dma3_chan_prep_hw(chan, DMA_DEV_TO_MEM, &swdesc->ccr, &ctr1, &ctr2,
1429 src, dst, period_len);
1430 } else {
1431 dev_err(chan2dev(chan), "Invalid direction\n");
1432 ret = -EINVAL;
1433 }
1434
1435 if (ret)
1436 goto err_desc_free;
1437
1438 for (i = 0; i < count; i++) {
1439 if (dir == DMA_MEM_TO_DEV) {
1440 src = buf_addr + i * period_len;
1441 dst = chan->dma_config.dst_addr;
1442 } else { /* (dir == DMA_DEV_TO_MEM) */
1443 src = chan->dma_config.src_addr;
1444 dst = buf_addr + i * period_len;
1445 }
1446
1447 stm32_dma3_chan_prep_hwdesc(chan, swdesc, i, src, dst, period_len,
1448 ctr1, ctr2, i == (count - 1), true);
1449 }
1450
1451 /* Enable Error interrupts */
1452 swdesc->ccr |= CCR_USEIE | CCR_ULEIE | CCR_DTEIE;
1453 /* Enable Transfer state interrupts */
1454 swdesc->ccr |= CCR_TCIE;
1455
1456 swdesc->cyclic = true;
1457
1458 return vchan_tx_prep(&chan->vchan, &swdesc->vdesc, flags);
1459
1460 err_desc_free:
1461 stm32_dma3_chan_desc_free(chan, swdesc);
1462
1463 return NULL;
1464 }
1465
stm32_dma3_caps(struct dma_chan * c,struct dma_slave_caps * caps)1466 static void stm32_dma3_caps(struct dma_chan *c, struct dma_slave_caps *caps)
1467 {
1468 struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c);
1469
1470 if (!chan->fifo_size) {
1471 caps->max_burst = 0;
1472 caps->src_addr_widths &= ~BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1473 caps->dst_addr_widths &= ~BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1474 } else {
1475 /* Burst transfer should not exceed half of the fifo size */
1476 caps->max_burst = chan->max_burst;
1477 if (caps->max_burst < DMA_SLAVE_BUSWIDTH_8_BYTES) {
1478 caps->src_addr_widths &= ~BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1479 caps->dst_addr_widths &= ~BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1480 }
1481 }
1482 }
1483
stm32_dma3_config(struct dma_chan * c,struct dma_slave_config * config)1484 static int stm32_dma3_config(struct dma_chan *c, struct dma_slave_config *config)
1485 {
1486 struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c);
1487
1488 memcpy(&chan->dma_config, config, sizeof(*config));
1489 chan->config_set |= STM32_DMA3_CFG_SET_DMA;
1490
1491 return 0;
1492 }
1493
stm32_dma3_pause(struct dma_chan * c)1494 static int stm32_dma3_pause(struct dma_chan *c)
1495 {
1496 struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c);
1497 int ret;
1498
1499 ret = stm32_dma3_chan_suspend(chan, true);
1500 if (ret)
1501 return ret;
1502
1503 chan->dma_status = DMA_PAUSED;
1504
1505 dev_dbg(chan2dev(chan), "vchan %p: paused\n", &chan->vchan);
1506
1507 return 0;
1508 }
1509
stm32_dma3_resume(struct dma_chan * c)1510 static int stm32_dma3_resume(struct dma_chan *c)
1511 {
1512 struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c);
1513
1514 stm32_dma3_chan_suspend(chan, false);
1515
1516 chan->dma_status = DMA_IN_PROGRESS;
1517
1518 dev_dbg(chan2dev(chan), "vchan %p: resumed\n", &chan->vchan);
1519
1520 return 0;
1521 }
1522
stm32_dma3_terminate_all(struct dma_chan * c)1523 static int stm32_dma3_terminate_all(struct dma_chan *c)
1524 {
1525 struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c);
1526 unsigned long flags;
1527 LIST_HEAD(head);
1528
1529 spin_lock_irqsave(&chan->vchan.lock, flags);
1530
1531 if (chan->swdesc) {
1532 vchan_terminate_vdesc(&chan->swdesc->vdesc);
1533 chan->swdesc = NULL;
1534 }
1535
1536 stm32_dma3_chan_stop(chan);
1537
1538 vchan_get_all_descriptors(&chan->vchan, &head);
1539
1540 spin_unlock_irqrestore(&chan->vchan.lock, flags);
1541 vchan_dma_desc_free_list(&chan->vchan, &head);
1542
1543 dev_dbg(chan2dev(chan), "vchan %p: terminated\n", &chan->vchan);
1544
1545 return 0;
1546 }
1547
stm32_dma3_synchronize(struct dma_chan * c)1548 static void stm32_dma3_synchronize(struct dma_chan *c)
1549 {
1550 struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c);
1551
1552 vchan_synchronize(&chan->vchan);
1553 }
1554
stm32_dma3_tx_status(struct dma_chan * c,dma_cookie_t cookie,struct dma_tx_state * txstate)1555 static enum dma_status stm32_dma3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
1556 struct dma_tx_state *txstate)
1557 {
1558 struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c);
1559 struct stm32_dma3_swdesc *swdesc = NULL;
1560 enum dma_status status;
1561 unsigned long flags;
1562 struct virt_dma_desc *vd;
1563
1564 status = dma_cookie_status(c, cookie, txstate);
1565 if (status == DMA_COMPLETE)
1566 return status;
1567
1568 if (!txstate)
1569 return chan->dma_status;
1570
1571 spin_lock_irqsave(&chan->vchan.lock, flags);
1572
1573 vd = vchan_find_desc(&chan->vchan, cookie);
1574 if (vd)
1575 swdesc = to_stm32_dma3_swdesc(vd);
1576 else if (chan->swdesc && chan->swdesc->vdesc.tx.cookie == cookie)
1577 swdesc = chan->swdesc;
1578
1579 /* Get residue/in_flight_bytes only if a transfer is currently running (swdesc != NULL) */
1580 if (swdesc)
1581 stm32_dma3_chan_set_residue(chan, swdesc, txstate);
1582
1583 spin_unlock_irqrestore(&chan->vchan.lock, flags);
1584
1585 return chan->dma_status;
1586 }
1587
stm32_dma3_issue_pending(struct dma_chan * c)1588 static void stm32_dma3_issue_pending(struct dma_chan *c)
1589 {
1590 struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c);
1591 unsigned long flags;
1592
1593 spin_lock_irqsave(&chan->vchan.lock, flags);
1594
1595 if (vchan_issue_pending(&chan->vchan) && !chan->swdesc) {
1596 dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan);
1597 stm32_dma3_chan_start(chan);
1598 }
1599
1600 spin_unlock_irqrestore(&chan->vchan.lock, flags);
1601 }
1602
stm32_dma3_filter_fn(struct dma_chan * c,void * fn_param)1603 static bool stm32_dma3_filter_fn(struct dma_chan *c, void *fn_param)
1604 {
1605 struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c);
1606 struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
1607 struct stm32_dma3_dt_conf *conf = fn_param;
1608 u32 mask, semcr;
1609 int ret;
1610
1611 dev_dbg(c->device->dev, "%s(%s): req_line=%d ch_conf=%08x tr_conf=%08x\n",
1612 __func__, dma_chan_name(c), conf->req_line, conf->ch_conf, conf->tr_conf);
1613
1614 if (!of_property_read_u32(c->device->dev->of_node, "dma-channel-mask", &mask))
1615 if (!(mask & BIT(chan->id)))
1616 return false;
1617
1618 ret = pm_runtime_resume_and_get(ddata2dev(ddata));
1619 if (ret < 0)
1620 return false;
1621 semcr = readl_relaxed(ddata->base + STM32_DMA3_CSEMCR(chan->id));
1622 pm_runtime_put_sync(ddata2dev(ddata));
1623
1624 /* Check if chan is free */
1625 if (semcr & CSEMCR_SEM_MUTEX)
1626 return false;
1627
1628 /* Check if chan fifo fits well */
1629 if (FIELD_GET(STM32_DMA3_DT_FIFO, conf->ch_conf) != chan->fifo_size)
1630 return false;
1631
1632 return true;
1633 }
1634
stm32_dma3_of_xlate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)1635 static struct dma_chan *stm32_dma3_of_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma)
1636 {
1637 struct stm32_dma3_ddata *ddata = ofdma->of_dma_data;
1638 dma_cap_mask_t mask = ddata->dma_dev.cap_mask;
1639 struct stm32_dma3_dt_conf conf;
1640 struct stm32_dma3_chan *chan;
1641 struct dma_chan *c;
1642
1643 if (dma_spec->args_count < 3) {
1644 dev_err(ddata2dev(ddata), "Invalid args count\n");
1645 return NULL;
1646 }
1647
1648 conf.req_line = dma_spec->args[0];
1649 conf.ch_conf = dma_spec->args[1];
1650 conf.tr_conf = dma_spec->args[2];
1651
1652 if (conf.req_line >= ddata->dma_requests) {
1653 dev_err(ddata2dev(ddata), "Invalid request line\n");
1654 return NULL;
1655 }
1656
1657 /* Request dma channel among the generic dma controller list */
1658 c = dma_request_channel(mask, stm32_dma3_filter_fn, &conf);
1659 if (!c) {
1660 dev_err(ddata2dev(ddata), "No suitable channel found\n");
1661 return NULL;
1662 }
1663
1664 chan = to_stm32_dma3_chan(c);
1665 chan->dt_config = conf;
1666 chan->config_set |= STM32_DMA3_CFG_SET_DT;
1667
1668 return c;
1669 }
1670
stm32_dma3_check_rif(struct stm32_dma3_ddata * ddata)1671 static u32 stm32_dma3_check_rif(struct stm32_dma3_ddata *ddata)
1672 {
1673 struct device *dev = ddata2dev(ddata);
1674 u32 chan_reserved, mask = 0, i, ccidcfgr, invalid_cid = 0;
1675
1676 /* Reserve Secure channels */
1677 chan_reserved = readl_relaxed(ddata->base + STM32_DMA3_SECCFGR);
1678
1679 /*
1680 * CID filtering must be configured to ensure that the DMA3 channel will inherit the CID of
1681 * the processor which is configuring and using the given channel.
1682 * In case CID filtering is not configured, dma-channel-mask property can be used to
1683 * specify available DMA channels to the kernel.
1684 */
1685 of_property_read_u32(dev->of_node, "dma-channel-mask", &mask);
1686
1687 /* Reserve !CID-filtered not in dma-channel-mask, static CID != CID1, CID1 not allowed */
1688 for (i = 0; i < ddata->dma_channels; i++) {
1689 ccidcfgr = readl_relaxed(ddata->base + STM32_DMA3_CCIDCFGR(i));
1690
1691 if (!(ccidcfgr & CCIDCFGR_CFEN)) { /* !CID-filtered */
1692 invalid_cid |= BIT(i);
1693 if (!(mask & BIT(i))) /* Not in dma-channel-mask */
1694 chan_reserved |= BIT(i);
1695 } else { /* CID-filtered */
1696 if (!(ccidcfgr & CCIDCFGR_SEM_EN)) { /* Static CID mode */
1697 if (FIELD_GET(CCIDCFGR_SCID, ccidcfgr) != CCIDCFGR_CID1)
1698 chan_reserved |= BIT(i);
1699 } else { /* Semaphore mode */
1700 if (!FIELD_GET(CCIDCFGR_SEM_WLIST_CID1, ccidcfgr))
1701 chan_reserved |= BIT(i);
1702 ddata->chans[i].semaphore_mode = true;
1703 }
1704 }
1705 dev_dbg(dev, "chan%d: %s mode, %s\n", i,
1706 !(ccidcfgr & CCIDCFGR_CFEN) ? "!CID-filtered" :
1707 ddata->chans[i].semaphore_mode ? "Semaphore" : "Static CID",
1708 (chan_reserved & BIT(i)) ? "denied" :
1709 mask & BIT(i) ? "force allowed" : "allowed");
1710 }
1711
1712 if (invalid_cid)
1713 dev_warn(dev, "chan%*pbl have invalid CID configuration\n",
1714 ddata->dma_channels, &invalid_cid);
1715
1716 return chan_reserved;
1717 }
1718
1719 static struct stm32_dma3_pdata stm32mp25_pdata = {
1720 .axi_max_burst_len = 16,
1721 };
1722
1723 static const struct of_device_id stm32_dma3_of_match[] = {
1724 { .compatible = "st,stm32mp25-dma3", .data = &stm32mp25_pdata, },
1725 { /* sentinel */ },
1726 };
1727 MODULE_DEVICE_TABLE(of, stm32_dma3_of_match);
1728
stm32_dma3_probe(struct platform_device * pdev)1729 static int stm32_dma3_probe(struct platform_device *pdev)
1730 {
1731 struct device_node *np = pdev->dev.of_node;
1732 const struct stm32_dma3_pdata *pdata;
1733 struct stm32_dma3_ddata *ddata;
1734 struct reset_control *reset;
1735 struct stm32_dma3_chan *chan;
1736 struct dma_device *dma_dev;
1737 u32 master_ports, chan_reserved, i, verr;
1738 u64 hwcfgr;
1739 int ret;
1740
1741 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
1742 if (!ddata)
1743 return -ENOMEM;
1744 platform_set_drvdata(pdev, ddata);
1745
1746 dma_dev = &ddata->dma_dev;
1747
1748 ddata->base = devm_platform_ioremap_resource(pdev, 0);
1749 if (IS_ERR(ddata->base))
1750 return PTR_ERR(ddata->base);
1751
1752 ddata->clk = devm_clk_get(&pdev->dev, NULL);
1753 if (IS_ERR(ddata->clk))
1754 return dev_err_probe(&pdev->dev, PTR_ERR(ddata->clk), "Failed to get clk\n");
1755
1756 reset = devm_reset_control_get_optional(&pdev->dev, NULL);
1757 if (IS_ERR(reset))
1758 return dev_err_probe(&pdev->dev, PTR_ERR(reset), "Failed to get reset\n");
1759
1760 ret = clk_prepare_enable(ddata->clk);
1761 if (ret)
1762 return dev_err_probe(&pdev->dev, ret, "Failed to enable clk\n");
1763
1764 reset_control_reset(reset);
1765
1766 INIT_LIST_HEAD(&dma_dev->channels);
1767
1768 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1769 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1770 dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
1771 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1772 dma_dev->dev = &pdev->dev;
1773 /*
1774 * This controller supports up to 8-byte buswidth depending on the port used and the
1775 * channel, and can only access address at even boundaries, multiple of the buswidth.
1776 */
1777 dma_dev->copy_align = DMAENGINE_ALIGN_8_BYTES;
1778 dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1779 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1780 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1781 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1782 dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1783 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1784 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1785 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1786 dma_dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) | BIT(DMA_MEM_TO_MEM);
1787
1788 dma_dev->descriptor_reuse = true;
1789 dma_dev->max_sg_burst = STM32_DMA3_MAX_SEG_SIZE;
1790 dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1791 dma_dev->device_alloc_chan_resources = stm32_dma3_alloc_chan_resources;
1792 dma_dev->device_free_chan_resources = stm32_dma3_free_chan_resources;
1793 dma_dev->device_prep_dma_memcpy = stm32_dma3_prep_dma_memcpy;
1794 dma_dev->device_prep_slave_sg = stm32_dma3_prep_slave_sg;
1795 dma_dev->device_prep_dma_cyclic = stm32_dma3_prep_dma_cyclic;
1796 dma_dev->device_caps = stm32_dma3_caps;
1797 dma_dev->device_config = stm32_dma3_config;
1798 dma_dev->device_pause = stm32_dma3_pause;
1799 dma_dev->device_resume = stm32_dma3_resume;
1800 dma_dev->device_terminate_all = stm32_dma3_terminate_all;
1801 dma_dev->device_synchronize = stm32_dma3_synchronize;
1802 dma_dev->device_tx_status = stm32_dma3_tx_status;
1803 dma_dev->device_issue_pending = stm32_dma3_issue_pending;
1804
1805 /* if dma_channels is not modified, get it from hwcfgr1 */
1806 if (of_property_read_u32(np, "dma-channels", &ddata->dma_channels)) {
1807 hwcfgr = readl_relaxed(ddata->base + STM32_DMA3_HWCFGR1);
1808 ddata->dma_channels = FIELD_GET(G_NUM_CHANNELS, hwcfgr);
1809 }
1810
1811 /* if dma_requests is not modified, get it from hwcfgr2 */
1812 if (of_property_read_u32(np, "dma-requests", &ddata->dma_requests)) {
1813 hwcfgr = readl_relaxed(ddata->base + STM32_DMA3_HWCFGR2);
1814 ddata->dma_requests = FIELD_GET(G_MAX_REQ_ID, hwcfgr) + 1;
1815 }
1816
1817 /* G_MASTER_PORTS, G_M0_DATA_WIDTH_ENC, G_M1_DATA_WIDTH_ENC in HWCFGR1 */
1818 hwcfgr = readl_relaxed(ddata->base + STM32_DMA3_HWCFGR1);
1819 master_ports = FIELD_GET(G_MASTER_PORTS, hwcfgr);
1820
1821 ddata->ports_max_dw[0] = FIELD_GET(G_M0_DATA_WIDTH_ENC, hwcfgr);
1822 if (master_ports == AXI64 || master_ports == AHB32) /* Single master port */
1823 ddata->ports_max_dw[1] = DW_INVALID;
1824 else /* Dual master ports */
1825 ddata->ports_max_dw[1] = FIELD_GET(G_M1_DATA_WIDTH_ENC, hwcfgr);
1826
1827 /* axi_max_burst_len is optional, if not defined, use STM32_DMA3_MAX_BURST_LEN */
1828 ddata->axi_max_burst_len = STM32_DMA3_MAX_BURST_LEN;
1829 pdata = device_get_match_data(&pdev->dev);
1830 if (pdata && pdata->axi_max_burst_len) {
1831 ddata->axi_max_burst_len = min_t(u32, pdata->axi_max_burst_len,
1832 STM32_DMA3_MAX_BURST_LEN);
1833 dev_dbg(&pdev->dev, "Burst is limited to %u beats through AXI port\n",
1834 ddata->axi_max_burst_len);
1835 }
1836
1837 ddata->chans = devm_kcalloc(&pdev->dev, ddata->dma_channels, sizeof(*ddata->chans),
1838 GFP_KERNEL);
1839 if (!ddata->chans) {
1840 ret = -ENOMEM;
1841 goto err_clk_disable;
1842 }
1843
1844 chan_reserved = stm32_dma3_check_rif(ddata);
1845
1846 if (chan_reserved == GENMASK(ddata->dma_channels - 1, 0)) {
1847 ret = -ENODEV;
1848 dev_err_probe(&pdev->dev, ret, "No channel available, abort registration\n");
1849 goto err_clk_disable;
1850 }
1851
1852 /* G_FIFO_SIZE x=0..7 in HWCFGR3 and G_FIFO_SIZE x=8..15 in HWCFGR4 */
1853 hwcfgr = readl_relaxed(ddata->base + STM32_DMA3_HWCFGR3);
1854 hwcfgr |= ((u64)readl_relaxed(ddata->base + STM32_DMA3_HWCFGR4)) << 32;
1855
1856 for (i = 0; i < ddata->dma_channels; i++) {
1857 if (chan_reserved & BIT(i))
1858 continue;
1859
1860 chan = &ddata->chans[i];
1861 chan->id = i;
1862 chan->fifo_size = get_chan_hwcfg(i, G_FIFO_SIZE(i), hwcfgr);
1863 /* If chan->fifo_size > 0 then half of the fifo size, else no burst when no FIFO */
1864 chan->max_burst = (chan->fifo_size) ? (1 << (chan->fifo_size + 1)) / 2 : 0;
1865 }
1866
1867 ret = dmaenginem_async_device_register(dma_dev);
1868 if (ret)
1869 goto err_clk_disable;
1870
1871 for (i = 0; i < ddata->dma_channels; i++) {
1872 char name[12];
1873
1874 if (chan_reserved & BIT(i))
1875 continue;
1876
1877 chan = &ddata->chans[i];
1878 snprintf(name, sizeof(name), "dma%dchan%d", ddata->dma_dev.dev_id, chan->id);
1879
1880 chan->vchan.desc_free = stm32_dma3_chan_vdesc_free;
1881 vchan_init(&chan->vchan, dma_dev);
1882
1883 ret = dma_async_device_channel_register(&ddata->dma_dev, &chan->vchan.chan, name);
1884 if (ret) {
1885 dev_err_probe(&pdev->dev, ret, "Failed to register channel %s\n", name);
1886 goto err_clk_disable;
1887 }
1888
1889 ret = platform_get_irq(pdev, i);
1890 if (ret < 0)
1891 goto err_clk_disable;
1892 chan->irq = ret;
1893
1894 ret = devm_request_irq(&pdev->dev, chan->irq, stm32_dma3_chan_irq, 0,
1895 dev_name(chan2dev(chan)), chan);
1896 if (ret) {
1897 dev_err_probe(&pdev->dev, ret, "Failed to request channel %s IRQ\n",
1898 dev_name(chan2dev(chan)));
1899 goto err_clk_disable;
1900 }
1901 }
1902
1903 ret = of_dma_controller_register(np, stm32_dma3_of_xlate, ddata);
1904 if (ret) {
1905 dev_err_probe(&pdev->dev, ret, "Failed to register controller\n");
1906 goto err_clk_disable;
1907 }
1908
1909 verr = readl_relaxed(ddata->base + STM32_DMA3_VERR);
1910
1911 pm_runtime_set_active(&pdev->dev);
1912 pm_runtime_enable(&pdev->dev);
1913 pm_runtime_get_noresume(&pdev->dev);
1914 pm_runtime_put(&pdev->dev);
1915
1916 dev_info(&pdev->dev, "STM32 DMA3 registered rev:%lu.%lu\n",
1917 FIELD_GET(VERR_MAJREV, verr), FIELD_GET(VERR_MINREV, verr));
1918
1919 return 0;
1920
1921 err_clk_disable:
1922 clk_disable_unprepare(ddata->clk);
1923
1924 return ret;
1925 }
1926
stm32_dma3_remove(struct platform_device * pdev)1927 static void stm32_dma3_remove(struct platform_device *pdev)
1928 {
1929 pm_runtime_disable(&pdev->dev);
1930 }
1931
stm32_dma3_runtime_suspend(struct device * dev)1932 static int stm32_dma3_runtime_suspend(struct device *dev)
1933 {
1934 struct stm32_dma3_ddata *ddata = dev_get_drvdata(dev);
1935
1936 clk_disable_unprepare(ddata->clk);
1937
1938 return 0;
1939 }
1940
stm32_dma3_runtime_resume(struct device * dev)1941 static int stm32_dma3_runtime_resume(struct device *dev)
1942 {
1943 struct stm32_dma3_ddata *ddata = dev_get_drvdata(dev);
1944 int ret;
1945
1946 ret = clk_prepare_enable(ddata->clk);
1947 if (ret)
1948 dev_err(dev, "Failed to enable clk: %d\n", ret);
1949
1950 return ret;
1951 }
1952
stm32_dma3_pm_suspend(struct device * dev)1953 static int stm32_dma3_pm_suspend(struct device *dev)
1954 {
1955 struct stm32_dma3_ddata *ddata = dev_get_drvdata(dev);
1956 struct dma_device *dma_dev = &ddata->dma_dev;
1957 struct dma_chan *c;
1958 int ccr, ret;
1959
1960 ret = pm_runtime_resume_and_get(dev);
1961 if (ret < 0)
1962 return ret;
1963
1964 list_for_each_entry(c, &dma_dev->channels, device_node) {
1965 struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c);
1966
1967 ccr = readl_relaxed(ddata->base + STM32_DMA3_CCR(chan->id));
1968 if (ccr & CCR_EN) {
1969 dev_warn(dev, "Suspend is prevented: %s still in use by %s\n",
1970 dma_chan_name(c), dev_name(c->slave));
1971 pm_runtime_put_sync(dev);
1972 return -EBUSY;
1973 }
1974 }
1975
1976 pm_runtime_put_sync(dev);
1977
1978 pm_runtime_force_suspend(dev);
1979
1980 return 0;
1981 }
1982
stm32_dma3_pm_resume(struct device * dev)1983 static int stm32_dma3_pm_resume(struct device *dev)
1984 {
1985 struct stm32_dma3_ddata *ddata = dev_get_drvdata(dev);
1986 struct dma_device *dma_dev = &ddata->dma_dev;
1987 struct dma_chan *c;
1988 int ret;
1989
1990 ret = pm_runtime_force_resume(dev);
1991 if (ret < 0)
1992 return ret;
1993
1994 ret = pm_runtime_resume_and_get(dev);
1995 if (ret < 0)
1996 return ret;
1997
1998 /*
1999 * Channel semaphores need to be restored in case of registers reset during low power.
2000 * stm32_dma3_get_chan_sem() will prior check the semaphore status.
2001 */
2002 list_for_each_entry(c, &dma_dev->channels, device_node) {
2003 struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c);
2004
2005 if (chan->semaphore_mode && chan->semaphore_taken)
2006 stm32_dma3_get_chan_sem(chan);
2007 }
2008
2009 pm_runtime_put_sync(dev);
2010
2011 return 0;
2012 }
2013
2014 static const struct dev_pm_ops stm32_dma3_pm_ops = {
2015 SYSTEM_SLEEP_PM_OPS(stm32_dma3_pm_suspend, stm32_dma3_pm_resume)
2016 RUNTIME_PM_OPS(stm32_dma3_runtime_suspend, stm32_dma3_runtime_resume, NULL)
2017 };
2018
2019 static struct platform_driver stm32_dma3_driver = {
2020 .probe = stm32_dma3_probe,
2021 .remove = stm32_dma3_remove,
2022 .driver = {
2023 .name = "stm32-dma3",
2024 .of_match_table = stm32_dma3_of_match,
2025 .pm = pm_ptr(&stm32_dma3_pm_ops),
2026 },
2027 };
2028
2029 module_platform_driver(stm32_dma3_driver);
2030
2031 MODULE_DESCRIPTION("STM32 DMA3 controller driver");
2032 MODULE_AUTHOR("Amelie Delaunay <amelie.delaunay@foss.st.com>");
2033 MODULE_LICENSE("GPL");
2034