1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Lightning Mountain centralized DMA controller driver
4 *
5 * Copyright (c) 2016 - 2020 Intel Corporation.
6 */
7
8 #include <linux/bitfield.h>
9 #include <linux/clk.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/err.h>
13 #include <linux/export.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/iopoll.h>
17 #include <linux/of_dma.h>
18 #include <linux/of_irq.h>
19 #include <linux/platform_device.h>
20 #include <linux/reset.h>
21
22 #include "../dmaengine.h"
23 #include "../virt-dma.h"
24
25 #define DRIVER_NAME "lgm-dma"
26
27 #define DMA_ID 0x0008
28 #define DMA_ID_REV GENMASK(7, 0)
29 #define DMA_ID_PNR GENMASK(19, 16)
30 #define DMA_ID_CHNR GENMASK(26, 20)
31 #define DMA_ID_DW_128B BIT(27)
32 #define DMA_ID_AW_36B BIT(28)
33 #define DMA_VER32 0x32
34 #define DMA_VER31 0x31
35 #define DMA_VER22 0x0A
36
37 #define DMA_CTRL 0x0010
38 #define DMA_CTRL_RST BIT(0)
39 #define DMA_CTRL_DSRAM_PATH BIT(1)
40 #define DMA_CTRL_DBURST_WR BIT(3)
41 #define DMA_CTRL_VLD_DF_ACK BIT(4)
42 #define DMA_CTRL_CH_FL BIT(6)
43 #define DMA_CTRL_DS_FOD BIT(7)
44 #define DMA_CTRL_DRB BIT(8)
45 #define DMA_CTRL_ENBE BIT(9)
46 #define DMA_CTRL_DESC_TMOUT_CNT_V31 GENMASK(27, 16)
47 #define DMA_CTRL_DESC_TMOUT_EN_V31 BIT(30)
48 #define DMA_CTRL_PKTARB BIT(31)
49
50 #define DMA_CPOLL 0x0014
51 #define DMA_CPOLL_CNT GENMASK(15, 4)
52 #define DMA_CPOLL_EN BIT(31)
53
54 #define DMA_CS 0x0018
55 #define DMA_CS_MASK GENMASK(5, 0)
56
57 #define DMA_CCTRL 0x001C
58 #define DMA_CCTRL_ON BIT(0)
59 #define DMA_CCTRL_RST BIT(1)
60 #define DMA_CCTRL_CH_POLL_EN BIT(2)
61 #define DMA_CCTRL_CH_ABC BIT(3) /* Adaptive Burst Chop */
62 #define DMA_CDBA_MSB GENMASK(7, 4)
63 #define DMA_CCTRL_DIR_TX BIT(8)
64 #define DMA_CCTRL_CLASS GENMASK(11, 9)
65 #define DMA_CCTRL_CLASSH GENMASK(19, 18)
66 #define DMA_CCTRL_WR_NP_EN BIT(21)
67 #define DMA_CCTRL_PDEN BIT(23)
68 #define DMA_MAX_CLASS (SZ_32 - 1)
69
70 #define DMA_CDBA 0x0020
71 #define DMA_CDLEN 0x0024
72 #define DMA_CIS 0x0028
73 #define DMA_CIE 0x002C
74 #define DMA_CI_EOP BIT(1)
75 #define DMA_CI_DUR BIT(2)
76 #define DMA_CI_DESCPT BIT(3)
77 #define DMA_CI_CHOFF BIT(4)
78 #define DMA_CI_RDERR BIT(5)
79 #define DMA_CI_ALL \
80 (DMA_CI_EOP | DMA_CI_DUR | DMA_CI_DESCPT | DMA_CI_CHOFF | DMA_CI_RDERR)
81
82 #define DMA_PS 0x0040
83 #define DMA_PCTRL 0x0044
84 #define DMA_PCTRL_RXBL16 BIT(0)
85 #define DMA_PCTRL_TXBL16 BIT(1)
86 #define DMA_PCTRL_RXBL GENMASK(3, 2)
87 #define DMA_PCTRL_RXBL_8 3
88 #define DMA_PCTRL_TXBL GENMASK(5, 4)
89 #define DMA_PCTRL_TXBL_8 3
90 #define DMA_PCTRL_PDEN BIT(6)
91 #define DMA_PCTRL_RXBL32 BIT(7)
92 #define DMA_PCTRL_RXENDI GENMASK(9, 8)
93 #define DMA_PCTRL_TXENDI GENMASK(11, 10)
94 #define DMA_PCTRL_TXBL32 BIT(15)
95 #define DMA_PCTRL_MEM_FLUSH BIT(16)
96
97 #define DMA_IRNEN1 0x00E8
98 #define DMA_IRNCR1 0x00EC
99 #define DMA_IRNEN 0x00F4
100 #define DMA_IRNCR 0x00F8
101 #define DMA_C_DP_TICK 0x100
102 #define DMA_C_DP_TICK_TIKNARB GENMASK(15, 0)
103 #define DMA_C_DP_TICK_TIKARB GENMASK(31, 16)
104
105 #define DMA_C_HDRM 0x110
106 /*
107 * If header mode is set in DMA descriptor,
108 * If bit 30 is disabled, HDR_LEN must be configured according to channel
109 * requirement.
110 * If bit 30 is enabled(checksum with header mode), HDR_LEN has no need to
111 * be configured. It will enable check sum for switch
112 * If header mode is not set in DMA descriptor,
113 * This register setting doesn't matter
114 */
115 #define DMA_C_HDRM_HDR_SUM BIT(30)
116
117 #define DMA_C_BOFF 0x120
118 #define DMA_C_BOFF_BOF_LEN GENMASK(7, 0)
119 #define DMA_C_BOFF_EN BIT(31)
120
121 #define DMA_ORRC 0x190
122 #define DMA_ORRC_ORRCNT GENMASK(8, 4)
123 #define DMA_ORRC_EN BIT(31)
124
125 #define DMA_C_ENDIAN 0x200
126 #define DMA_C_END_DATAENDI GENMASK(1, 0)
127 #define DMA_C_END_DE_EN BIT(7)
128 #define DMA_C_END_DESENDI GENMASK(9, 8)
129 #define DMA_C_END_DES_EN BIT(16)
130
131 /* DMA controller capability */
132 #define DMA_ADDR_36BIT BIT(0)
133 #define DMA_DATA_128BIT BIT(1)
134 #define DMA_CHAN_FLOW_CTL BIT(2)
135 #define DMA_DESC_FOD BIT(3)
136 #define DMA_DESC_IN_SRAM BIT(4)
137 #define DMA_EN_BYTE_EN BIT(5)
138 #define DMA_DBURST_WR BIT(6)
139 #define DMA_VALID_DESC_FETCH_ACK BIT(7)
140 #define DMA_DFT_DRB BIT(8)
141
142 #define DMA_ORRC_MAX_CNT (SZ_32 - 1)
143 #define DMA_DFT_POLL_CNT SZ_4
144 #define DMA_DFT_BURST_V22 SZ_2
145 #define DMA_BURSTL_8DW SZ_8
146 #define DMA_BURSTL_16DW SZ_16
147 #define DMA_BURSTL_32DW SZ_32
148 #define DMA_DFT_BURST DMA_BURSTL_16DW
149 #define DMA_MAX_DESC_NUM (SZ_8K - 1)
150 #define DMA_CHAN_BOFF_MAX (SZ_256 - 1)
151 #define DMA_DFT_ENDIAN 0
152
153 #define DMA_DFT_DESC_TCNT 50
154 #define DMA_HDR_LEN_MAX (SZ_16K - 1)
155
156 /* DMA flags */
157 #define DMA_TX_CH BIT(0)
158 #define DMA_RX_CH BIT(1)
159 #define DEVICE_ALLOC_DESC BIT(2)
160 #define CHAN_IN_USE BIT(3)
161 #define DMA_HW_DESC BIT(4)
162
163 /* Descriptor fields */
164 #define DESC_DATA_LEN GENMASK(15, 0)
165 #define DESC_BYTE_OFF GENMASK(25, 23)
166 #define DESC_EOP BIT(28)
167 #define DESC_SOP BIT(29)
168 #define DESC_C BIT(30)
169 #define DESC_OWN BIT(31)
170
171 #define DMA_CHAN_RST 1
172 #define DMA_MAX_SIZE (BIT(16) - 1)
173 #define MAX_LOWER_CHANS 32
174 #define MASK_LOWER_CHANS GENMASK(4, 0)
175 #define DMA_OWN 1
176 #define HIGH_4_BITS GENMASK(3, 0)
177 #define DMA_DFT_DESC_NUM 1
178 #define DMA_PKT_DROP_DIS 0
179
180 enum ldma_chan_on_off {
181 DMA_CH_OFF = 0,
182 DMA_CH_ON = 1,
183 };
184
185 enum {
186 DMA_TYPE_TX = 0,
187 DMA_TYPE_RX,
188 DMA_TYPE_MCPY,
189 };
190
191 struct ldma_dev;
192 struct ldma_port;
193
194 struct ldma_chan {
195 struct virt_dma_chan vchan;
196 struct ldma_port *port; /* back pointer */
197 char name[8]; /* Channel name */
198 int nr; /* Channel id in hardware */
199 u32 flags; /* central way or channel based way */
200 enum ldma_chan_on_off onoff;
201 dma_addr_t desc_phys;
202 void *desc_base; /* Virtual address */
203 u32 desc_cnt; /* Number of descriptors */
204 int rst;
205 u32 hdrm_len;
206 bool hdrm_csum;
207 u32 boff_len;
208 u32 data_endian;
209 u32 desc_endian;
210 bool pden;
211 bool desc_rx_np;
212 bool data_endian_en;
213 bool desc_endian_en;
214 bool abc_en;
215 bool desc_init;
216 struct dma_pool *desc_pool; /* Descriptors pool */
217 u32 desc_num;
218 struct dw2_desc_sw *ds;
219 struct work_struct work;
220 struct dma_slave_config config;
221 };
222
223 struct ldma_port {
224 struct ldma_dev *ldev; /* back pointer */
225 u32 portid;
226 u32 rxbl;
227 u32 txbl;
228 u32 rxendi;
229 u32 txendi;
230 u32 pkt_drop;
231 };
232
233 /* Instance specific data */
234 struct ldma_inst_data {
235 bool desc_in_sram;
236 bool chan_fc;
237 bool desc_fod; /* Fetch On Demand */
238 bool valid_desc_fetch_ack;
239 u32 orrc; /* Outstanding read count */
240 const char *name;
241 u32 type;
242 };
243
244 struct ldma_dev {
245 struct device *dev;
246 void __iomem *base;
247 struct reset_control *rst;
248 struct clk *core_clk;
249 struct dma_device dma_dev;
250 u32 ver;
251 int irq;
252 struct ldma_port *ports;
253 struct ldma_chan *chans; /* channel list on this DMA or port */
254 spinlock_t dev_lock; /* Controller register exclusive */
255 u32 chan_nrs;
256 u32 port_nrs;
257 u32 channels_mask;
258 u32 flags;
259 u32 pollcnt;
260 const struct ldma_inst_data *inst;
261 struct workqueue_struct *wq;
262 };
263
264 struct dw2_desc {
265 u32 field;
266 u32 addr;
267 } __packed __aligned(8);
268
269 struct dw2_desc_sw {
270 struct virt_dma_desc vdesc;
271 struct ldma_chan *chan;
272 dma_addr_t desc_phys;
273 size_t desc_cnt;
274 size_t size;
275 struct dw2_desc *desc_hw;
276 };
277
278 static inline void
ldma_update_bits(struct ldma_dev * d,u32 mask,u32 val,u32 ofs)279 ldma_update_bits(struct ldma_dev *d, u32 mask, u32 val, u32 ofs)
280 {
281 u32 old_val, new_val;
282
283 old_val = readl(d->base + ofs);
284 new_val = (old_val & ~mask) | (val & mask);
285
286 if (new_val != old_val)
287 writel(new_val, d->base + ofs);
288 }
289
to_ldma_chan(struct dma_chan * chan)290 static inline struct ldma_chan *to_ldma_chan(struct dma_chan *chan)
291 {
292 return container_of(chan, struct ldma_chan, vchan.chan);
293 }
294
to_ldma_dev(struct dma_device * dma_dev)295 static inline struct ldma_dev *to_ldma_dev(struct dma_device *dma_dev)
296 {
297 return container_of(dma_dev, struct ldma_dev, dma_dev);
298 }
299
to_lgm_dma_desc(struct virt_dma_desc * vdesc)300 static inline struct dw2_desc_sw *to_lgm_dma_desc(struct virt_dma_desc *vdesc)
301 {
302 return container_of(vdesc, struct dw2_desc_sw, vdesc);
303 }
304
ldma_chan_tx(struct ldma_chan * c)305 static inline bool ldma_chan_tx(struct ldma_chan *c)
306 {
307 return !!(c->flags & DMA_TX_CH);
308 }
309
ldma_chan_is_hw_desc(struct ldma_chan * c)310 static inline bool ldma_chan_is_hw_desc(struct ldma_chan *c)
311 {
312 return !!(c->flags & DMA_HW_DESC);
313 }
314
ldma_dev_reset(struct ldma_dev * d)315 static void ldma_dev_reset(struct ldma_dev *d)
316
317 {
318 unsigned long flags;
319
320 spin_lock_irqsave(&d->dev_lock, flags);
321 ldma_update_bits(d, DMA_CTRL_RST, DMA_CTRL_RST, DMA_CTRL);
322 spin_unlock_irqrestore(&d->dev_lock, flags);
323 }
324
ldma_dev_pkt_arb_cfg(struct ldma_dev * d,bool enable)325 static void ldma_dev_pkt_arb_cfg(struct ldma_dev *d, bool enable)
326 {
327 unsigned long flags;
328 u32 mask = DMA_CTRL_PKTARB;
329 u32 val = enable ? DMA_CTRL_PKTARB : 0;
330
331 spin_lock_irqsave(&d->dev_lock, flags);
332 ldma_update_bits(d, mask, val, DMA_CTRL);
333 spin_unlock_irqrestore(&d->dev_lock, flags);
334 }
335
ldma_dev_sram_desc_cfg(struct ldma_dev * d,bool enable)336 static void ldma_dev_sram_desc_cfg(struct ldma_dev *d, bool enable)
337 {
338 unsigned long flags;
339 u32 mask = DMA_CTRL_DSRAM_PATH;
340 u32 val = enable ? DMA_CTRL_DSRAM_PATH : 0;
341
342 spin_lock_irqsave(&d->dev_lock, flags);
343 ldma_update_bits(d, mask, val, DMA_CTRL);
344 spin_unlock_irqrestore(&d->dev_lock, flags);
345 }
346
ldma_dev_chan_flow_ctl_cfg(struct ldma_dev * d,bool enable)347 static void ldma_dev_chan_flow_ctl_cfg(struct ldma_dev *d, bool enable)
348 {
349 unsigned long flags;
350 u32 mask, val;
351
352 if (d->inst->type != DMA_TYPE_TX)
353 return;
354
355 mask = DMA_CTRL_CH_FL;
356 val = enable ? DMA_CTRL_CH_FL : 0;
357
358 spin_lock_irqsave(&d->dev_lock, flags);
359 ldma_update_bits(d, mask, val, DMA_CTRL);
360 spin_unlock_irqrestore(&d->dev_lock, flags);
361 }
362
ldma_dev_global_polling_enable(struct ldma_dev * d)363 static void ldma_dev_global_polling_enable(struct ldma_dev *d)
364 {
365 unsigned long flags;
366 u32 mask = DMA_CPOLL_EN | DMA_CPOLL_CNT;
367 u32 val = DMA_CPOLL_EN;
368
369 val |= FIELD_PREP(DMA_CPOLL_CNT, d->pollcnt);
370
371 spin_lock_irqsave(&d->dev_lock, flags);
372 ldma_update_bits(d, mask, val, DMA_CPOLL);
373 spin_unlock_irqrestore(&d->dev_lock, flags);
374 }
375
ldma_dev_desc_fetch_on_demand_cfg(struct ldma_dev * d,bool enable)376 static void ldma_dev_desc_fetch_on_demand_cfg(struct ldma_dev *d, bool enable)
377 {
378 unsigned long flags;
379 u32 mask, val;
380
381 if (d->inst->type == DMA_TYPE_MCPY)
382 return;
383
384 mask = DMA_CTRL_DS_FOD;
385 val = enable ? DMA_CTRL_DS_FOD : 0;
386
387 spin_lock_irqsave(&d->dev_lock, flags);
388 ldma_update_bits(d, mask, val, DMA_CTRL);
389 spin_unlock_irqrestore(&d->dev_lock, flags);
390 }
391
ldma_dev_byte_enable_cfg(struct ldma_dev * d,bool enable)392 static void ldma_dev_byte_enable_cfg(struct ldma_dev *d, bool enable)
393 {
394 unsigned long flags;
395 u32 mask = DMA_CTRL_ENBE;
396 u32 val = enable ? DMA_CTRL_ENBE : 0;
397
398 spin_lock_irqsave(&d->dev_lock, flags);
399 ldma_update_bits(d, mask, val, DMA_CTRL);
400 spin_unlock_irqrestore(&d->dev_lock, flags);
401 }
402
ldma_dev_orrc_cfg(struct ldma_dev * d)403 static void ldma_dev_orrc_cfg(struct ldma_dev *d)
404 {
405 unsigned long flags;
406 u32 val = 0;
407 u32 mask;
408
409 if (d->inst->type == DMA_TYPE_RX)
410 return;
411
412 mask = DMA_ORRC_EN | DMA_ORRC_ORRCNT;
413 if (d->inst->orrc > 0 && d->inst->orrc <= DMA_ORRC_MAX_CNT)
414 val = DMA_ORRC_EN | FIELD_PREP(DMA_ORRC_ORRCNT, d->inst->orrc);
415
416 spin_lock_irqsave(&d->dev_lock, flags);
417 ldma_update_bits(d, mask, val, DMA_ORRC);
418 spin_unlock_irqrestore(&d->dev_lock, flags);
419 }
420
ldma_dev_df_tout_cfg(struct ldma_dev * d,bool enable,int tcnt)421 static void ldma_dev_df_tout_cfg(struct ldma_dev *d, bool enable, int tcnt)
422 {
423 u32 mask = DMA_CTRL_DESC_TMOUT_CNT_V31;
424 unsigned long flags;
425 u32 val;
426
427 if (enable)
428 val = DMA_CTRL_DESC_TMOUT_EN_V31 | FIELD_PREP(DMA_CTRL_DESC_TMOUT_CNT_V31, tcnt);
429 else
430 val = 0;
431
432 spin_lock_irqsave(&d->dev_lock, flags);
433 ldma_update_bits(d, mask, val, DMA_CTRL);
434 spin_unlock_irqrestore(&d->dev_lock, flags);
435 }
436
ldma_dev_dburst_wr_cfg(struct ldma_dev * d,bool enable)437 static void ldma_dev_dburst_wr_cfg(struct ldma_dev *d, bool enable)
438 {
439 unsigned long flags;
440 u32 mask, val;
441
442 if (d->inst->type != DMA_TYPE_RX && d->inst->type != DMA_TYPE_MCPY)
443 return;
444
445 mask = DMA_CTRL_DBURST_WR;
446 val = enable ? DMA_CTRL_DBURST_WR : 0;
447
448 spin_lock_irqsave(&d->dev_lock, flags);
449 ldma_update_bits(d, mask, val, DMA_CTRL);
450 spin_unlock_irqrestore(&d->dev_lock, flags);
451 }
452
ldma_dev_vld_fetch_ack_cfg(struct ldma_dev * d,bool enable)453 static void ldma_dev_vld_fetch_ack_cfg(struct ldma_dev *d, bool enable)
454 {
455 unsigned long flags;
456 u32 mask, val;
457
458 if (d->inst->type != DMA_TYPE_TX)
459 return;
460
461 mask = DMA_CTRL_VLD_DF_ACK;
462 val = enable ? DMA_CTRL_VLD_DF_ACK : 0;
463
464 spin_lock_irqsave(&d->dev_lock, flags);
465 ldma_update_bits(d, mask, val, DMA_CTRL);
466 spin_unlock_irqrestore(&d->dev_lock, flags);
467 }
468
ldma_dev_drb_cfg(struct ldma_dev * d,int enable)469 static void ldma_dev_drb_cfg(struct ldma_dev *d, int enable)
470 {
471 unsigned long flags;
472 u32 mask = DMA_CTRL_DRB;
473 u32 val = enable ? DMA_CTRL_DRB : 0;
474
475 spin_lock_irqsave(&d->dev_lock, flags);
476 ldma_update_bits(d, mask, val, DMA_CTRL);
477 spin_unlock_irqrestore(&d->dev_lock, flags);
478 }
479
ldma_dev_cfg(struct ldma_dev * d)480 static int ldma_dev_cfg(struct ldma_dev *d)
481 {
482 bool enable;
483
484 ldma_dev_pkt_arb_cfg(d, true);
485 ldma_dev_global_polling_enable(d);
486
487 enable = !!(d->flags & DMA_DFT_DRB);
488 ldma_dev_drb_cfg(d, enable);
489
490 enable = !!(d->flags & DMA_EN_BYTE_EN);
491 ldma_dev_byte_enable_cfg(d, enable);
492
493 enable = !!(d->flags & DMA_CHAN_FLOW_CTL);
494 ldma_dev_chan_flow_ctl_cfg(d, enable);
495
496 enable = !!(d->flags & DMA_DESC_FOD);
497 ldma_dev_desc_fetch_on_demand_cfg(d, enable);
498
499 enable = !!(d->flags & DMA_DESC_IN_SRAM);
500 ldma_dev_sram_desc_cfg(d, enable);
501
502 enable = !!(d->flags & DMA_DBURST_WR);
503 ldma_dev_dburst_wr_cfg(d, enable);
504
505 enable = !!(d->flags & DMA_VALID_DESC_FETCH_ACK);
506 ldma_dev_vld_fetch_ack_cfg(d, enable);
507
508 if (d->ver > DMA_VER22) {
509 ldma_dev_orrc_cfg(d);
510 ldma_dev_df_tout_cfg(d, true, DMA_DFT_DESC_TCNT);
511 }
512
513 dev_dbg(d->dev, "%s Controller 0x%08x configuration done\n",
514 d->inst->name, readl(d->base + DMA_CTRL));
515
516 return 0;
517 }
518
ldma_chan_cctrl_cfg(struct ldma_chan * c,u32 val)519 static int ldma_chan_cctrl_cfg(struct ldma_chan *c, u32 val)
520 {
521 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
522 u32 class_low, class_high;
523 unsigned long flags;
524 u32 reg;
525
526 spin_lock_irqsave(&d->dev_lock, flags);
527 ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
528 reg = readl(d->base + DMA_CCTRL);
529 /* Read from hardware */
530 if (reg & DMA_CCTRL_DIR_TX)
531 c->flags |= DMA_TX_CH;
532 else
533 c->flags |= DMA_RX_CH;
534
535 /* Keep the class value unchanged */
536 class_low = FIELD_GET(DMA_CCTRL_CLASS, reg);
537 class_high = FIELD_GET(DMA_CCTRL_CLASSH, reg);
538 val &= ~DMA_CCTRL_CLASS;
539 val |= FIELD_PREP(DMA_CCTRL_CLASS, class_low);
540 val &= ~DMA_CCTRL_CLASSH;
541 val |= FIELD_PREP(DMA_CCTRL_CLASSH, class_high);
542 writel(val, d->base + DMA_CCTRL);
543 spin_unlock_irqrestore(&d->dev_lock, flags);
544
545 return 0;
546 }
547
ldma_chan_irq_init(struct ldma_chan * c)548 static void ldma_chan_irq_init(struct ldma_chan *c)
549 {
550 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
551 unsigned long flags;
552 u32 enofs, crofs;
553 u32 cn_bit;
554
555 if (c->nr < MAX_LOWER_CHANS) {
556 enofs = DMA_IRNEN;
557 crofs = DMA_IRNCR;
558 } else {
559 enofs = DMA_IRNEN1;
560 crofs = DMA_IRNCR1;
561 }
562
563 cn_bit = BIT(c->nr & MASK_LOWER_CHANS);
564 spin_lock_irqsave(&d->dev_lock, flags);
565 ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
566
567 /* Clear all interrupts and disabled it */
568 writel(0, d->base + DMA_CIE);
569 writel(DMA_CI_ALL, d->base + DMA_CIS);
570
571 ldma_update_bits(d, cn_bit, 0, enofs);
572 writel(cn_bit, d->base + crofs);
573 spin_unlock_irqrestore(&d->dev_lock, flags);
574 }
575
ldma_chan_set_class(struct ldma_chan * c,u32 val)576 static void ldma_chan_set_class(struct ldma_chan *c, u32 val)
577 {
578 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
579 u32 class_val;
580
581 if (d->inst->type == DMA_TYPE_MCPY || val > DMA_MAX_CLASS)
582 return;
583
584 /* 3 bits low */
585 class_val = FIELD_PREP(DMA_CCTRL_CLASS, val & 0x7);
586 /* 2 bits high */
587 class_val |= FIELD_PREP(DMA_CCTRL_CLASSH, (val >> 3) & 0x3);
588
589 ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
590 ldma_update_bits(d, DMA_CCTRL_CLASS | DMA_CCTRL_CLASSH, class_val,
591 DMA_CCTRL);
592 }
593
ldma_chan_on(struct ldma_chan * c)594 static int ldma_chan_on(struct ldma_chan *c)
595 {
596 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
597 unsigned long flags;
598
599 /* If descriptors not configured, not allow to turn on channel */
600 if (WARN_ON(!c->desc_init))
601 return -EINVAL;
602
603 spin_lock_irqsave(&d->dev_lock, flags);
604 ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
605 ldma_update_bits(d, DMA_CCTRL_ON, DMA_CCTRL_ON, DMA_CCTRL);
606 spin_unlock_irqrestore(&d->dev_lock, flags);
607
608 c->onoff = DMA_CH_ON;
609
610 return 0;
611 }
612
ldma_chan_off(struct ldma_chan * c)613 static int ldma_chan_off(struct ldma_chan *c)
614 {
615 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
616 unsigned long flags;
617 u32 val;
618 int ret;
619
620 spin_lock_irqsave(&d->dev_lock, flags);
621 ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
622 ldma_update_bits(d, DMA_CCTRL_ON, 0, DMA_CCTRL);
623 spin_unlock_irqrestore(&d->dev_lock, flags);
624
625 ret = readl_poll_timeout_atomic(d->base + DMA_CCTRL, val,
626 !(val & DMA_CCTRL_ON), 0, 10000);
627 if (ret)
628 return ret;
629
630 c->onoff = DMA_CH_OFF;
631
632 return 0;
633 }
634
ldma_chan_desc_hw_cfg(struct ldma_chan * c,dma_addr_t desc_base,int desc_num)635 static void ldma_chan_desc_hw_cfg(struct ldma_chan *c, dma_addr_t desc_base,
636 int desc_num)
637 {
638 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
639 unsigned long flags;
640
641 spin_lock_irqsave(&d->dev_lock, flags);
642 ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
643 writel(lower_32_bits(desc_base), d->base + DMA_CDBA);
644
645 /* Higher 4 bits of 36 bit addressing */
646 if (IS_ENABLED(CONFIG_64BIT)) {
647 u32 hi = upper_32_bits(desc_base) & HIGH_4_BITS;
648
649 ldma_update_bits(d, DMA_CDBA_MSB,
650 FIELD_PREP(DMA_CDBA_MSB, hi), DMA_CCTRL);
651 }
652 writel(desc_num, d->base + DMA_CDLEN);
653 spin_unlock_irqrestore(&d->dev_lock, flags);
654
655 c->desc_init = true;
656 }
657
658 static struct dma_async_tx_descriptor *
ldma_chan_desc_cfg(struct dma_chan * chan,dma_addr_t desc_base,int desc_num)659 ldma_chan_desc_cfg(struct dma_chan *chan, dma_addr_t desc_base, int desc_num)
660 {
661 struct ldma_chan *c = to_ldma_chan(chan);
662 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
663 struct dma_async_tx_descriptor *tx;
664 struct dw2_desc_sw *ds;
665
666 if (!desc_num) {
667 dev_err(d->dev, "Channel %d must allocate descriptor first\n",
668 c->nr);
669 return NULL;
670 }
671
672 if (desc_num > DMA_MAX_DESC_NUM) {
673 dev_err(d->dev, "Channel %d descriptor number out of range %d\n",
674 c->nr, desc_num);
675 return NULL;
676 }
677
678 ldma_chan_desc_hw_cfg(c, desc_base, desc_num);
679
680 c->flags |= DMA_HW_DESC;
681 c->desc_cnt = desc_num;
682 c->desc_phys = desc_base;
683
684 ds = kzalloc_obj(*ds, GFP_NOWAIT);
685 if (!ds)
686 return NULL;
687
688 tx = &ds->vdesc.tx;
689 dma_async_tx_descriptor_init(tx, chan);
690
691 return tx;
692 }
693
ldma_chan_reset(struct ldma_chan * c)694 static int ldma_chan_reset(struct ldma_chan *c)
695 {
696 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
697 unsigned long flags;
698 u32 val;
699 int ret;
700
701 ret = ldma_chan_off(c);
702 if (ret)
703 return ret;
704
705 spin_lock_irqsave(&d->dev_lock, flags);
706 ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
707 ldma_update_bits(d, DMA_CCTRL_RST, DMA_CCTRL_RST, DMA_CCTRL);
708 spin_unlock_irqrestore(&d->dev_lock, flags);
709
710 ret = readl_poll_timeout_atomic(d->base + DMA_CCTRL, val,
711 !(val & DMA_CCTRL_RST), 0, 10000);
712 if (ret)
713 return ret;
714
715 c->rst = 1;
716 c->desc_init = false;
717
718 return 0;
719 }
720
ldma_chan_byte_offset_cfg(struct ldma_chan * c,u32 boff_len)721 static void ldma_chan_byte_offset_cfg(struct ldma_chan *c, u32 boff_len)
722 {
723 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
724 u32 mask = DMA_C_BOFF_EN | DMA_C_BOFF_BOF_LEN;
725 u32 val;
726
727 if (boff_len > 0 && boff_len <= DMA_CHAN_BOFF_MAX)
728 val = FIELD_PREP(DMA_C_BOFF_BOF_LEN, boff_len) | DMA_C_BOFF_EN;
729 else
730 val = 0;
731
732 ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
733 ldma_update_bits(d, mask, val, DMA_C_BOFF);
734 }
735
ldma_chan_data_endian_cfg(struct ldma_chan * c,bool enable,u32 endian_type)736 static void ldma_chan_data_endian_cfg(struct ldma_chan *c, bool enable,
737 u32 endian_type)
738 {
739 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
740 u32 mask = DMA_C_END_DE_EN | DMA_C_END_DATAENDI;
741 u32 val;
742
743 if (enable)
744 val = DMA_C_END_DE_EN | FIELD_PREP(DMA_C_END_DATAENDI, endian_type);
745 else
746 val = 0;
747
748 ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
749 ldma_update_bits(d, mask, val, DMA_C_ENDIAN);
750 }
751
ldma_chan_desc_endian_cfg(struct ldma_chan * c,bool enable,u32 endian_type)752 static void ldma_chan_desc_endian_cfg(struct ldma_chan *c, bool enable,
753 u32 endian_type)
754 {
755 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
756 u32 mask = DMA_C_END_DES_EN | DMA_C_END_DESENDI;
757 u32 val;
758
759 if (enable)
760 val = DMA_C_END_DES_EN | FIELD_PREP(DMA_C_END_DESENDI, endian_type);
761 else
762 val = 0;
763
764 ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
765 ldma_update_bits(d, mask, val, DMA_C_ENDIAN);
766 }
767
ldma_chan_hdr_mode_cfg(struct ldma_chan * c,u32 hdr_len,bool csum)768 static void ldma_chan_hdr_mode_cfg(struct ldma_chan *c, u32 hdr_len, bool csum)
769 {
770 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
771 u32 mask, val;
772
773 /* NB, csum disabled, hdr length must be provided */
774 if (!csum && (!hdr_len || hdr_len > DMA_HDR_LEN_MAX))
775 return;
776
777 mask = DMA_C_HDRM_HDR_SUM;
778 val = DMA_C_HDRM_HDR_SUM;
779
780 if (!csum && hdr_len)
781 val = hdr_len;
782
783 ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
784 ldma_update_bits(d, mask, val, DMA_C_HDRM);
785 }
786
ldma_chan_rxwr_np_cfg(struct ldma_chan * c,bool enable)787 static void ldma_chan_rxwr_np_cfg(struct ldma_chan *c, bool enable)
788 {
789 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
790 u32 mask, val;
791
792 /* Only valid for RX channel */
793 if (ldma_chan_tx(c))
794 return;
795
796 mask = DMA_CCTRL_WR_NP_EN;
797 val = enable ? DMA_CCTRL_WR_NP_EN : 0;
798
799 ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
800 ldma_update_bits(d, mask, val, DMA_CCTRL);
801 }
802
ldma_chan_abc_cfg(struct ldma_chan * c,bool enable)803 static void ldma_chan_abc_cfg(struct ldma_chan *c, bool enable)
804 {
805 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
806 u32 mask, val;
807
808 if (d->ver < DMA_VER32 || ldma_chan_tx(c))
809 return;
810
811 mask = DMA_CCTRL_CH_ABC;
812 val = enable ? DMA_CCTRL_CH_ABC : 0;
813
814 ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
815 ldma_update_bits(d, mask, val, DMA_CCTRL);
816 }
817
ldma_port_cfg(struct ldma_port * p)818 static int ldma_port_cfg(struct ldma_port *p)
819 {
820 unsigned long flags;
821 struct ldma_dev *d;
822 u32 reg;
823
824 d = p->ldev;
825 reg = FIELD_PREP(DMA_PCTRL_TXENDI, p->txendi);
826 reg |= FIELD_PREP(DMA_PCTRL_RXENDI, p->rxendi);
827
828 if (d->ver == DMA_VER22) {
829 reg |= FIELD_PREP(DMA_PCTRL_TXBL, p->txbl);
830 reg |= FIELD_PREP(DMA_PCTRL_RXBL, p->rxbl);
831 } else {
832 reg |= FIELD_PREP(DMA_PCTRL_PDEN, p->pkt_drop);
833
834 if (p->txbl == DMA_BURSTL_32DW)
835 reg |= DMA_PCTRL_TXBL32;
836 else if (p->txbl == DMA_BURSTL_16DW)
837 reg |= DMA_PCTRL_TXBL16;
838 else
839 reg |= FIELD_PREP(DMA_PCTRL_TXBL, DMA_PCTRL_TXBL_8);
840
841 if (p->rxbl == DMA_BURSTL_32DW)
842 reg |= DMA_PCTRL_RXBL32;
843 else if (p->rxbl == DMA_BURSTL_16DW)
844 reg |= DMA_PCTRL_RXBL16;
845 else
846 reg |= FIELD_PREP(DMA_PCTRL_RXBL, DMA_PCTRL_RXBL_8);
847 }
848
849 spin_lock_irqsave(&d->dev_lock, flags);
850 writel(p->portid, d->base + DMA_PS);
851 writel(reg, d->base + DMA_PCTRL);
852 spin_unlock_irqrestore(&d->dev_lock, flags);
853
854 reg = readl(d->base + DMA_PCTRL); /* read back */
855 dev_dbg(d->dev, "Port Control 0x%08x configuration done\n", reg);
856
857 return 0;
858 }
859
ldma_chan_cfg(struct ldma_chan * c)860 static int ldma_chan_cfg(struct ldma_chan *c)
861 {
862 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
863 unsigned long flags;
864 u32 reg;
865
866 reg = c->pden ? DMA_CCTRL_PDEN : 0;
867 reg |= c->onoff ? DMA_CCTRL_ON : 0;
868 reg |= c->rst ? DMA_CCTRL_RST : 0;
869
870 ldma_chan_cctrl_cfg(c, reg);
871 ldma_chan_irq_init(c);
872
873 if (d->ver <= DMA_VER22)
874 return 0;
875
876 spin_lock_irqsave(&d->dev_lock, flags);
877 ldma_chan_set_class(c, c->nr);
878 ldma_chan_byte_offset_cfg(c, c->boff_len);
879 ldma_chan_data_endian_cfg(c, c->data_endian_en, c->data_endian);
880 ldma_chan_desc_endian_cfg(c, c->desc_endian_en, c->desc_endian);
881 ldma_chan_hdr_mode_cfg(c, c->hdrm_len, c->hdrm_csum);
882 ldma_chan_rxwr_np_cfg(c, c->desc_rx_np);
883 ldma_chan_abc_cfg(c, c->abc_en);
884 spin_unlock_irqrestore(&d->dev_lock, flags);
885
886 if (ldma_chan_is_hw_desc(c))
887 ldma_chan_desc_hw_cfg(c, c->desc_phys, c->desc_cnt);
888
889 return 0;
890 }
891
ldma_dev_init(struct ldma_dev * d)892 static void ldma_dev_init(struct ldma_dev *d)
893 {
894 unsigned long ch_mask = (unsigned long)d->channels_mask;
895 struct ldma_port *p;
896 struct ldma_chan *c;
897 int i;
898 u32 j;
899
900 spin_lock_init(&d->dev_lock);
901 ldma_dev_reset(d);
902 ldma_dev_cfg(d);
903
904 /* DMA port initialization */
905 for (i = 0; i < d->port_nrs; i++) {
906 p = &d->ports[i];
907 ldma_port_cfg(p);
908 }
909
910 /* DMA channel initialization */
911 for_each_set_bit(j, &ch_mask, d->chan_nrs) {
912 c = &d->chans[j];
913 ldma_chan_cfg(c);
914 }
915 }
916
ldma_parse_dt(struct ldma_dev * d)917 static int ldma_parse_dt(struct ldma_dev *d)
918 {
919 struct fwnode_handle *fwnode = dev_fwnode(d->dev);
920 struct ldma_port *p;
921 int i;
922
923 if (fwnode_property_read_bool(fwnode, "intel,dma-byte-en"))
924 d->flags |= DMA_EN_BYTE_EN;
925
926 if (fwnode_property_read_bool(fwnode, "intel,dma-dburst-wr"))
927 d->flags |= DMA_DBURST_WR;
928
929 if (fwnode_property_read_bool(fwnode, "intel,dma-drb"))
930 d->flags |= DMA_DFT_DRB;
931
932 if (fwnode_property_read_u32(fwnode, "intel,dma-poll-cnt",
933 &d->pollcnt))
934 d->pollcnt = DMA_DFT_POLL_CNT;
935
936 if (d->inst->chan_fc)
937 d->flags |= DMA_CHAN_FLOW_CTL;
938
939 if (d->inst->desc_fod)
940 d->flags |= DMA_DESC_FOD;
941
942 if (d->inst->desc_in_sram)
943 d->flags |= DMA_DESC_IN_SRAM;
944
945 if (d->inst->valid_desc_fetch_ack)
946 d->flags |= DMA_VALID_DESC_FETCH_ACK;
947
948 if (d->ver > DMA_VER22) {
949 if (!d->port_nrs)
950 return -EINVAL;
951
952 for (i = 0; i < d->port_nrs; i++) {
953 p = &d->ports[i];
954 p->rxendi = DMA_DFT_ENDIAN;
955 p->txendi = DMA_DFT_ENDIAN;
956 p->rxbl = DMA_DFT_BURST;
957 p->txbl = DMA_DFT_BURST;
958 p->pkt_drop = DMA_PKT_DROP_DIS;
959 }
960 }
961
962 return 0;
963 }
964
dma_free_desc_resource(struct virt_dma_desc * vdesc)965 static void dma_free_desc_resource(struct virt_dma_desc *vdesc)
966 {
967 struct dw2_desc_sw *ds = to_lgm_dma_desc(vdesc);
968 struct ldma_chan *c = ds->chan;
969
970 dma_pool_free(c->desc_pool, ds->desc_hw, ds->desc_phys);
971 kfree(ds);
972 }
973
974 static struct dw2_desc_sw *
dma_alloc_desc_resource(int num,struct ldma_chan * c)975 dma_alloc_desc_resource(int num, struct ldma_chan *c)
976 {
977 struct device *dev = c->vchan.chan.device->dev;
978 struct dw2_desc_sw *ds;
979
980 if (num > c->desc_num) {
981 dev_err(dev, "sg num %d exceed max %d\n", num, c->desc_num);
982 return NULL;
983 }
984
985 ds = kzalloc_obj(*ds, GFP_NOWAIT);
986 if (!ds)
987 return NULL;
988
989 ds->chan = c;
990 ds->desc_hw = dma_pool_zalloc(c->desc_pool, GFP_ATOMIC,
991 &ds->desc_phys);
992 if (!ds->desc_hw) {
993 dev_dbg(dev, "out of memory for link descriptor\n");
994 kfree(ds);
995 return NULL;
996 }
997 ds->desc_cnt = num;
998
999 return ds;
1000 }
1001
ldma_chan_irq_en(struct ldma_chan * c)1002 static void ldma_chan_irq_en(struct ldma_chan *c)
1003 {
1004 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
1005 unsigned long flags;
1006
1007 spin_lock_irqsave(&d->dev_lock, flags);
1008 writel(c->nr, d->base + DMA_CS);
1009 writel(DMA_CI_EOP, d->base + DMA_CIE);
1010 writel(BIT(c->nr), d->base + DMA_IRNEN);
1011 spin_unlock_irqrestore(&d->dev_lock, flags);
1012 }
1013
ldma_issue_pending(struct dma_chan * chan)1014 static void ldma_issue_pending(struct dma_chan *chan)
1015 {
1016 struct ldma_chan *c = to_ldma_chan(chan);
1017 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
1018 unsigned long flags;
1019
1020 if (d->ver == DMA_VER22) {
1021 spin_lock_irqsave(&c->vchan.lock, flags);
1022 if (vchan_issue_pending(&c->vchan)) {
1023 struct virt_dma_desc *vdesc;
1024
1025 /* Get the next descriptor */
1026 vdesc = vchan_next_desc(&c->vchan);
1027 if (!vdesc) {
1028 c->ds = NULL;
1029 spin_unlock_irqrestore(&c->vchan.lock, flags);
1030 return;
1031 }
1032 list_del(&vdesc->node);
1033 c->ds = to_lgm_dma_desc(vdesc);
1034 ldma_chan_desc_hw_cfg(c, c->ds->desc_phys, c->ds->desc_cnt);
1035 ldma_chan_irq_en(c);
1036 }
1037 spin_unlock_irqrestore(&c->vchan.lock, flags);
1038 }
1039 ldma_chan_on(c);
1040 }
1041
ldma_synchronize(struct dma_chan * chan)1042 static void ldma_synchronize(struct dma_chan *chan)
1043 {
1044 struct ldma_chan *c = to_ldma_chan(chan);
1045
1046 /*
1047 * clear any pending work if any. In that
1048 * case the resource needs to be free here.
1049 */
1050 cancel_work_sync(&c->work);
1051 vchan_synchronize(&c->vchan);
1052 if (c->ds)
1053 dma_free_desc_resource(&c->ds->vdesc);
1054 }
1055
ldma_terminate_all(struct dma_chan * chan)1056 static int ldma_terminate_all(struct dma_chan *chan)
1057 {
1058 struct ldma_chan *c = to_ldma_chan(chan);
1059 unsigned long flags;
1060 LIST_HEAD(head);
1061
1062 spin_lock_irqsave(&c->vchan.lock, flags);
1063 vchan_get_all_descriptors(&c->vchan, &head);
1064 spin_unlock_irqrestore(&c->vchan.lock, flags);
1065 vchan_dma_desc_free_list(&c->vchan, &head);
1066
1067 return ldma_chan_reset(c);
1068 }
1069
ldma_resume_chan(struct dma_chan * chan)1070 static int ldma_resume_chan(struct dma_chan *chan)
1071 {
1072 struct ldma_chan *c = to_ldma_chan(chan);
1073
1074 ldma_chan_on(c);
1075
1076 return 0;
1077 }
1078
ldma_pause_chan(struct dma_chan * chan)1079 static int ldma_pause_chan(struct dma_chan *chan)
1080 {
1081 struct ldma_chan *c = to_ldma_chan(chan);
1082
1083 return ldma_chan_off(c);
1084 }
1085
1086 static enum dma_status
ldma_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * txstate)1087 ldma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1088 struct dma_tx_state *txstate)
1089 {
1090 struct ldma_chan *c = to_ldma_chan(chan);
1091 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
1092 enum dma_status status = DMA_COMPLETE;
1093
1094 if (d->ver == DMA_VER22)
1095 status = dma_cookie_status(chan, cookie, txstate);
1096
1097 return status;
1098 }
1099
dma_chan_irq(int irq,void * data)1100 static void dma_chan_irq(int irq, void *data)
1101 {
1102 struct ldma_chan *c = data;
1103 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
1104 u32 stat;
1105
1106 /* Disable channel interrupts */
1107 writel(c->nr, d->base + DMA_CS);
1108 stat = readl(d->base + DMA_CIS);
1109 if (!stat)
1110 return;
1111
1112 writel(readl(d->base + DMA_CIE) & ~DMA_CI_ALL, d->base + DMA_CIE);
1113 writel(stat, d->base + DMA_CIS);
1114 queue_work(d->wq, &c->work);
1115 }
1116
dma_interrupt(int irq,void * dev_id)1117 static irqreturn_t dma_interrupt(int irq, void *dev_id)
1118 {
1119 struct ldma_dev *d = dev_id;
1120 struct ldma_chan *c;
1121 unsigned long irncr;
1122 u32 cid;
1123
1124 irncr = readl(d->base + DMA_IRNCR);
1125 if (!irncr) {
1126 dev_err(d->dev, "dummy interrupt\n");
1127 return IRQ_NONE;
1128 }
1129
1130 for_each_set_bit(cid, &irncr, d->chan_nrs) {
1131 /* Mask */
1132 writel(readl(d->base + DMA_IRNEN) & ~BIT(cid), d->base + DMA_IRNEN);
1133 /* Ack */
1134 writel(readl(d->base + DMA_IRNCR) | BIT(cid), d->base + DMA_IRNCR);
1135
1136 c = &d->chans[cid];
1137 dma_chan_irq(irq, c);
1138 }
1139
1140 return IRQ_HANDLED;
1141 }
1142
prep_slave_burst_len(struct ldma_chan * c)1143 static void prep_slave_burst_len(struct ldma_chan *c)
1144 {
1145 struct ldma_port *p = c->port;
1146 struct dma_slave_config *cfg = &c->config;
1147
1148 if (cfg->dst_maxburst)
1149 cfg->src_maxburst = cfg->dst_maxburst;
1150
1151 /* TX and RX has the same burst length */
1152 p->txbl = ilog2(cfg->src_maxburst);
1153 p->rxbl = p->txbl;
1154 }
1155
1156 static struct dma_async_tx_descriptor *
ldma_prep_slave_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sglen,enum dma_transfer_direction dir,unsigned long flags,void * context)1157 ldma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1158 unsigned int sglen, enum dma_transfer_direction dir,
1159 unsigned long flags, void *context)
1160 {
1161 struct ldma_chan *c = to_ldma_chan(chan);
1162 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
1163 size_t len, avail, total = 0;
1164 struct dw2_desc *hw_ds;
1165 struct dw2_desc_sw *ds;
1166 struct scatterlist *sg;
1167 dma_addr_t addr;
1168 int num, i;
1169
1170 if (!sgl)
1171 return NULL;
1172
1173 if (d->ver > DMA_VER22)
1174 return ldma_chan_desc_cfg(chan, sgl->dma_address, sglen);
1175
1176 num = sg_nents_for_dma(sgl, sglen, DMA_MAX_SIZE);
1177 ds = dma_alloc_desc_resource(num, c);
1178 if (!ds)
1179 return NULL;
1180
1181 c->ds = ds;
1182
1183 num = 0;
1184 /* sop and eop has to be handled nicely */
1185 for_each_sg(sgl, sg, sglen, i) {
1186 addr = sg_dma_address(sg);
1187 avail = sg_dma_len(sg);
1188 total += avail;
1189
1190 do {
1191 len = min_t(size_t, avail, DMA_MAX_SIZE);
1192
1193 hw_ds = &ds->desc_hw[num];
1194 switch (sglen) {
1195 case 1:
1196 hw_ds->field &= ~DESC_SOP;
1197 hw_ds->field |= FIELD_PREP(DESC_SOP, 1);
1198
1199 hw_ds->field &= ~DESC_EOP;
1200 hw_ds->field |= FIELD_PREP(DESC_EOP, 1);
1201 break;
1202 default:
1203 if (num == 0) {
1204 hw_ds->field &= ~DESC_SOP;
1205 hw_ds->field |= FIELD_PREP(DESC_SOP, 1);
1206
1207 hw_ds->field &= ~DESC_EOP;
1208 hw_ds->field |= FIELD_PREP(DESC_EOP, 0);
1209 } else if (num == (sglen - 1)) {
1210 hw_ds->field &= ~DESC_SOP;
1211 hw_ds->field |= FIELD_PREP(DESC_SOP, 0);
1212 hw_ds->field &= ~DESC_EOP;
1213 hw_ds->field |= FIELD_PREP(DESC_EOP, 1);
1214 } else {
1215 hw_ds->field &= ~DESC_SOP;
1216 hw_ds->field |= FIELD_PREP(DESC_SOP, 0);
1217
1218 hw_ds->field &= ~DESC_EOP;
1219 hw_ds->field |= FIELD_PREP(DESC_EOP, 0);
1220 }
1221 break;
1222 }
1223 /* Only 32 bit address supported */
1224 hw_ds->addr = (u32)addr;
1225
1226 hw_ds->field &= ~DESC_DATA_LEN;
1227 hw_ds->field |= FIELD_PREP(DESC_DATA_LEN, len);
1228
1229 hw_ds->field &= ~DESC_C;
1230 hw_ds->field |= FIELD_PREP(DESC_C, 0);
1231
1232 hw_ds->field &= ~DESC_BYTE_OFF;
1233 hw_ds->field |= FIELD_PREP(DESC_BYTE_OFF, addr & 0x3);
1234
1235 /* Ensure data ready before ownership change */
1236 wmb();
1237 hw_ds->field &= ~DESC_OWN;
1238 hw_ds->field |= FIELD_PREP(DESC_OWN, DMA_OWN);
1239
1240 /* Ensure ownership changed before moving forward */
1241 wmb();
1242 num++;
1243 addr += len;
1244 avail -= len;
1245 } while (avail);
1246 }
1247
1248 ds->size = total;
1249 prep_slave_burst_len(c);
1250
1251 return vchan_tx_prep(&c->vchan, &ds->vdesc, DMA_CTRL_ACK);
1252 }
1253
1254 static int
ldma_slave_config(struct dma_chan * chan,struct dma_slave_config * cfg)1255 ldma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
1256 {
1257 struct ldma_chan *c = to_ldma_chan(chan);
1258
1259 memcpy(&c->config, cfg, sizeof(c->config));
1260
1261 return 0;
1262 }
1263
ldma_alloc_chan_resources(struct dma_chan * chan)1264 static int ldma_alloc_chan_resources(struct dma_chan *chan)
1265 {
1266 struct ldma_chan *c = to_ldma_chan(chan);
1267 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
1268 struct device *dev = c->vchan.chan.device->dev;
1269 size_t desc_sz;
1270
1271 if (d->ver > DMA_VER22) {
1272 c->flags |= CHAN_IN_USE;
1273 return 0;
1274 }
1275
1276 if (c->desc_pool)
1277 return c->desc_num;
1278
1279 desc_sz = c->desc_num * sizeof(struct dw2_desc);
1280 c->desc_pool = dma_pool_create(c->name, dev, desc_sz,
1281 __alignof__(struct dw2_desc), 0);
1282
1283 if (!c->desc_pool) {
1284 dev_err(dev, "unable to allocate descriptor pool\n");
1285 return -ENOMEM;
1286 }
1287
1288 return c->desc_num;
1289 }
1290
ldma_free_chan_resources(struct dma_chan * chan)1291 static void ldma_free_chan_resources(struct dma_chan *chan)
1292 {
1293 struct ldma_chan *c = to_ldma_chan(chan);
1294 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
1295
1296 if (d->ver == DMA_VER22) {
1297 dma_pool_destroy(c->desc_pool);
1298 c->desc_pool = NULL;
1299 vchan_free_chan_resources(to_virt_chan(chan));
1300 ldma_chan_reset(c);
1301 } else {
1302 c->flags &= ~CHAN_IN_USE;
1303 }
1304 }
1305
dma_work(struct work_struct * work)1306 static void dma_work(struct work_struct *work)
1307 {
1308 struct ldma_chan *c = container_of(work, struct ldma_chan, work);
1309 struct dma_async_tx_descriptor *tx = &c->ds->vdesc.tx;
1310 struct virt_dma_chan *vc = &c->vchan;
1311 struct dmaengine_desc_callback cb;
1312 struct virt_dma_desc *vd, *_vd;
1313 unsigned long flags;
1314 LIST_HEAD(head);
1315
1316 spin_lock_irqsave(&c->vchan.lock, flags);
1317 list_splice_tail_init(&vc->desc_completed, &head);
1318 spin_unlock_irqrestore(&c->vchan.lock, flags);
1319 dmaengine_desc_get_callback(tx, &cb);
1320 dma_cookie_complete(tx);
1321 dmaengine_desc_callback_invoke(&cb, NULL);
1322
1323 list_for_each_entry_safe(vd, _vd, &head, node) {
1324 dmaengine_desc_get_callback(tx, &cb);
1325 dma_cookie_complete(tx);
1326 list_del(&vd->node);
1327 dmaengine_desc_callback_invoke(&cb, NULL);
1328
1329 vchan_vdesc_fini(vd);
1330 }
1331 c->ds = NULL;
1332 }
1333
1334 static void
update_burst_len_v22(struct ldma_chan * c,struct ldma_port * p,u32 burst)1335 update_burst_len_v22(struct ldma_chan *c, struct ldma_port *p, u32 burst)
1336 {
1337 if (ldma_chan_tx(c))
1338 p->txbl = ilog2(burst);
1339 else
1340 p->rxbl = ilog2(burst);
1341 }
1342
1343 static void
update_burst_len_v3X(struct ldma_chan * c,struct ldma_port * p,u32 burst)1344 update_burst_len_v3X(struct ldma_chan *c, struct ldma_port *p, u32 burst)
1345 {
1346 if (ldma_chan_tx(c))
1347 p->txbl = burst;
1348 else
1349 p->rxbl = burst;
1350 }
1351
1352 static int
update_client_configs(struct of_dma * ofdma,struct of_phandle_args * spec)1353 update_client_configs(struct of_dma *ofdma, struct of_phandle_args *spec)
1354 {
1355 struct ldma_dev *d = ofdma->of_dma_data;
1356 u32 chan_id = spec->args[0];
1357 u32 port_id = spec->args[1];
1358 u32 burst = spec->args[2];
1359 struct ldma_port *p;
1360 struct ldma_chan *c;
1361
1362 if (chan_id >= d->chan_nrs || port_id >= d->port_nrs)
1363 return 0;
1364
1365 p = &d->ports[port_id];
1366 c = &d->chans[chan_id];
1367 c->port = p;
1368
1369 if (d->ver == DMA_VER22)
1370 update_burst_len_v22(c, p, burst);
1371 else
1372 update_burst_len_v3X(c, p, burst);
1373
1374 ldma_port_cfg(p);
1375
1376 return 1;
1377 }
1378
ldma_xlate(struct of_phandle_args * spec,struct of_dma * ofdma)1379 static struct dma_chan *ldma_xlate(struct of_phandle_args *spec,
1380 struct of_dma *ofdma)
1381 {
1382 struct ldma_dev *d = ofdma->of_dma_data;
1383 u32 chan_id = spec->args[0];
1384 int ret;
1385
1386 if (!spec->args_count)
1387 return NULL;
1388
1389 /* if args_count is 1 driver use default settings */
1390 if (spec->args_count > 1) {
1391 ret = update_client_configs(ofdma, spec);
1392 if (!ret)
1393 return NULL;
1394 }
1395
1396 return dma_get_slave_channel(&d->chans[chan_id].vchan.chan);
1397 }
1398
ldma_dma_init_v22(int i,struct ldma_dev * d)1399 static void ldma_dma_init_v22(int i, struct ldma_dev *d)
1400 {
1401 struct ldma_chan *c;
1402
1403 c = &d->chans[i];
1404 c->nr = i; /* Real channel number */
1405 c->rst = DMA_CHAN_RST;
1406 c->desc_num = DMA_DFT_DESC_NUM;
1407 snprintf(c->name, sizeof(c->name), "chan%d", c->nr);
1408 INIT_WORK(&c->work, dma_work);
1409 c->vchan.desc_free = dma_free_desc_resource;
1410 vchan_init(&c->vchan, &d->dma_dev);
1411 }
1412
ldma_dma_init_v3X(int i,struct ldma_dev * d)1413 static void ldma_dma_init_v3X(int i, struct ldma_dev *d)
1414 {
1415 struct ldma_chan *c;
1416
1417 c = &d->chans[i];
1418 c->data_endian = DMA_DFT_ENDIAN;
1419 c->desc_endian = DMA_DFT_ENDIAN;
1420 c->data_endian_en = false;
1421 c->desc_endian_en = false;
1422 c->desc_rx_np = false;
1423 c->flags |= DEVICE_ALLOC_DESC;
1424 c->onoff = DMA_CH_OFF;
1425 c->rst = DMA_CHAN_RST;
1426 c->abc_en = true;
1427 c->hdrm_csum = false;
1428 c->boff_len = 0;
1429 c->nr = i;
1430 c->vchan.desc_free = dma_free_desc_resource;
1431 vchan_init(&c->vchan, &d->dma_dev);
1432 }
1433
ldma_init_v22(struct ldma_dev * d,struct platform_device * pdev)1434 static int ldma_init_v22(struct ldma_dev *d, struct platform_device *pdev)
1435 {
1436 int ret;
1437
1438 ret = device_property_read_u32(d->dev, "dma-channels", &d->chan_nrs);
1439 if (ret < 0) {
1440 dev_err(d->dev, "unable to read dma-channels property\n");
1441 return ret;
1442 }
1443
1444 d->irq = platform_get_irq(pdev, 0);
1445 if (d->irq < 0)
1446 return d->irq;
1447
1448 ret = devm_request_irq(&pdev->dev, d->irq, dma_interrupt, 0,
1449 DRIVER_NAME, d);
1450 if (ret)
1451 return ret;
1452
1453 d->wq = alloc_ordered_workqueue("dma_wq", WQ_MEM_RECLAIM |
1454 WQ_HIGHPRI);
1455 if (!d->wq)
1456 return -ENOMEM;
1457
1458 return 0;
1459 }
1460
ldma_clk_disable(void * data)1461 static void ldma_clk_disable(void *data)
1462 {
1463 struct ldma_dev *d = data;
1464
1465 clk_disable_unprepare(d->core_clk);
1466 reset_control_assert(d->rst);
1467 }
1468
1469 static const struct ldma_inst_data dma0 = {
1470 .name = "dma0",
1471 .chan_fc = false,
1472 .desc_fod = false,
1473 .desc_in_sram = false,
1474 .valid_desc_fetch_ack = false,
1475 };
1476
1477 static const struct ldma_inst_data dma2tx = {
1478 .name = "dma2tx",
1479 .type = DMA_TYPE_TX,
1480 .orrc = 16,
1481 .chan_fc = true,
1482 .desc_fod = true,
1483 .desc_in_sram = true,
1484 .valid_desc_fetch_ack = true,
1485 };
1486
1487 static const struct ldma_inst_data dma1rx = {
1488 .name = "dma1rx",
1489 .type = DMA_TYPE_RX,
1490 .orrc = 16,
1491 .chan_fc = false,
1492 .desc_fod = true,
1493 .desc_in_sram = true,
1494 .valid_desc_fetch_ack = false,
1495 };
1496
1497 static const struct ldma_inst_data dma1tx = {
1498 .name = "dma1tx",
1499 .type = DMA_TYPE_TX,
1500 .orrc = 16,
1501 .chan_fc = true,
1502 .desc_fod = true,
1503 .desc_in_sram = true,
1504 .valid_desc_fetch_ack = true,
1505 };
1506
1507 static const struct ldma_inst_data dma0tx = {
1508 .name = "dma0tx",
1509 .type = DMA_TYPE_TX,
1510 .orrc = 16,
1511 .chan_fc = true,
1512 .desc_fod = true,
1513 .desc_in_sram = true,
1514 .valid_desc_fetch_ack = true,
1515 };
1516
1517 static const struct ldma_inst_data dma3 = {
1518 .name = "dma3",
1519 .type = DMA_TYPE_MCPY,
1520 .orrc = 16,
1521 .chan_fc = false,
1522 .desc_fod = false,
1523 .desc_in_sram = true,
1524 .valid_desc_fetch_ack = false,
1525 };
1526
1527 static const struct ldma_inst_data toe_dma30 = {
1528 .name = "toe_dma30",
1529 .type = DMA_TYPE_MCPY,
1530 .orrc = 16,
1531 .chan_fc = false,
1532 .desc_fod = false,
1533 .desc_in_sram = true,
1534 .valid_desc_fetch_ack = true,
1535 };
1536
1537 static const struct ldma_inst_data toe_dma31 = {
1538 .name = "toe_dma31",
1539 .type = DMA_TYPE_MCPY,
1540 .orrc = 16,
1541 .chan_fc = false,
1542 .desc_fod = false,
1543 .desc_in_sram = true,
1544 .valid_desc_fetch_ack = true,
1545 };
1546
1547 static const struct of_device_id intel_ldma_match[] = {
1548 { .compatible = "intel,lgm-cdma", .data = &dma0},
1549 { .compatible = "intel,lgm-dma2tx", .data = &dma2tx},
1550 { .compatible = "intel,lgm-dma1rx", .data = &dma1rx},
1551 { .compatible = "intel,lgm-dma1tx", .data = &dma1tx},
1552 { .compatible = "intel,lgm-dma0tx", .data = &dma0tx},
1553 { .compatible = "intel,lgm-dma3", .data = &dma3},
1554 { .compatible = "intel,lgm-toe-dma30", .data = &toe_dma30},
1555 { .compatible = "intel,lgm-toe-dma31", .data = &toe_dma31},
1556 {}
1557 };
1558
intel_ldma_probe(struct platform_device * pdev)1559 static int intel_ldma_probe(struct platform_device *pdev)
1560 {
1561 struct device *dev = &pdev->dev;
1562 struct dma_device *dma_dev;
1563 unsigned long ch_mask;
1564 struct ldma_chan *c;
1565 struct ldma_port *p;
1566 struct ldma_dev *d;
1567 u32 id, bitn = 32, j;
1568 int i, ret;
1569
1570 d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
1571 if (!d)
1572 return -ENOMEM;
1573
1574 /* Link controller to platform device */
1575 d->dev = &pdev->dev;
1576
1577 d->inst = device_get_match_data(dev);
1578 if (!d->inst) {
1579 dev_err(dev, "No device match found\n");
1580 return -ENODEV;
1581 }
1582
1583 d->base = devm_platform_ioremap_resource(pdev, 0);
1584 if (IS_ERR(d->base))
1585 return PTR_ERR(d->base);
1586
1587 /* Power up and reset the dma engine, some DMAs always on?? */
1588 d->core_clk = devm_clk_get_optional(dev, NULL);
1589 if (IS_ERR(d->core_clk))
1590 return PTR_ERR(d->core_clk);
1591
1592 d->rst = devm_reset_control_get_optional(dev, NULL);
1593 if (IS_ERR(d->rst))
1594 return PTR_ERR(d->rst);
1595
1596 clk_prepare_enable(d->core_clk);
1597 reset_control_deassert(d->rst);
1598
1599 ret = devm_add_action_or_reset(dev, ldma_clk_disable, d);
1600 if (ret) {
1601 dev_err(dev, "Failed to devm_add_action_or_reset, %d\n", ret);
1602 return ret;
1603 }
1604
1605 id = readl(d->base + DMA_ID);
1606 d->chan_nrs = FIELD_GET(DMA_ID_CHNR, id);
1607 d->port_nrs = FIELD_GET(DMA_ID_PNR, id);
1608 d->ver = FIELD_GET(DMA_ID_REV, id);
1609
1610 if (id & DMA_ID_AW_36B)
1611 d->flags |= DMA_ADDR_36BIT;
1612
1613 if (IS_ENABLED(CONFIG_64BIT) && (id & DMA_ID_AW_36B))
1614 bitn = 36;
1615
1616 if (id & DMA_ID_DW_128B)
1617 d->flags |= DMA_DATA_128BIT;
1618
1619 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(bitn));
1620 if (ret) {
1621 dev_err(dev, "No usable DMA configuration\n");
1622 return ret;
1623 }
1624
1625 if (d->ver == DMA_VER22) {
1626 ret = ldma_init_v22(d, pdev);
1627 if (ret)
1628 return ret;
1629 }
1630
1631 ret = device_property_read_u32(dev, "dma-channel-mask", &d->channels_mask);
1632 if (ret < 0)
1633 d->channels_mask = GENMASK(d->chan_nrs - 1, 0);
1634
1635 dma_dev = &d->dma_dev;
1636
1637 dma_cap_zero(dma_dev->cap_mask);
1638 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1639
1640 /* Channel initializations */
1641 INIT_LIST_HEAD(&dma_dev->channels);
1642
1643 /* Port Initializations */
1644 d->ports = devm_kcalloc(dev, d->port_nrs, sizeof(*p), GFP_KERNEL);
1645 if (!d->ports)
1646 return -ENOMEM;
1647
1648 /* Channels Initializations */
1649 d->chans = devm_kcalloc(d->dev, d->chan_nrs, sizeof(*c), GFP_KERNEL);
1650 if (!d->chans)
1651 return -ENOMEM;
1652
1653 for (i = 0; i < d->port_nrs; i++) {
1654 p = &d->ports[i];
1655 p->portid = i;
1656 p->ldev = d;
1657 }
1658
1659 dma_dev->dev = &pdev->dev;
1660
1661 ch_mask = (unsigned long)d->channels_mask;
1662 for_each_set_bit(j, &ch_mask, d->chan_nrs) {
1663 if (d->ver == DMA_VER22)
1664 ldma_dma_init_v22(j, d);
1665 else
1666 ldma_dma_init_v3X(j, d);
1667 }
1668
1669 ret = ldma_parse_dt(d);
1670 if (ret)
1671 return ret;
1672
1673 dma_dev->device_alloc_chan_resources = ldma_alloc_chan_resources;
1674 dma_dev->device_free_chan_resources = ldma_free_chan_resources;
1675 dma_dev->device_terminate_all = ldma_terminate_all;
1676 dma_dev->device_issue_pending = ldma_issue_pending;
1677 dma_dev->device_tx_status = ldma_tx_status;
1678 dma_dev->device_resume = ldma_resume_chan;
1679 dma_dev->device_pause = ldma_pause_chan;
1680 dma_dev->device_prep_slave_sg = ldma_prep_slave_sg;
1681
1682 if (d->ver == DMA_VER22) {
1683 dma_dev->device_config = ldma_slave_config;
1684 dma_dev->device_synchronize = ldma_synchronize;
1685 dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1686 dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1687 dma_dev->directions = BIT(DMA_MEM_TO_DEV) |
1688 BIT(DMA_DEV_TO_MEM);
1689 dma_dev->residue_granularity =
1690 DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1691 }
1692
1693 platform_set_drvdata(pdev, d);
1694
1695 ldma_dev_init(d);
1696
1697 ret = dma_async_device_register(dma_dev);
1698 if (ret) {
1699 dev_err(dev, "Failed to register slave DMA engine device\n");
1700 return ret;
1701 }
1702
1703 ret = of_dma_controller_register(pdev->dev.of_node, ldma_xlate, d);
1704 if (ret) {
1705 dev_err(dev, "Failed to register of DMA controller\n");
1706 dma_async_device_unregister(dma_dev);
1707 return ret;
1708 }
1709
1710 dev_info(dev, "Init done - rev: %x, ports: %d channels: %d\n", d->ver,
1711 d->port_nrs, d->chan_nrs);
1712
1713 return 0;
1714 }
1715
1716 static struct platform_driver intel_ldma_driver = {
1717 .probe = intel_ldma_probe,
1718 .driver = {
1719 .name = DRIVER_NAME,
1720 .of_match_table = intel_ldma_match,
1721 },
1722 };
1723
1724 /*
1725 * Perform this driver as device_initcall to make sure initialization happens
1726 * before its DMA clients of some are platform specific and also to provide
1727 * registered DMA channels and DMA capabilities to clients before their
1728 * initialization.
1729 */
1730 builtin_platform_driver(intel_ldma_driver);
1731