ste_dma40.c (69f93faa57ed6c91b32aae1dcff7282fcb2872f5) ste_dma40.c (698e4732e7c9cf9f1f3eac2b8cdce8d4fe2b90bd)
1/*
2 * Copyright (C) ST-Ericsson SA 2007-2010
3 * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson
4 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
5 * License terms: GNU General Public License (GPL) version 2
6 */
7
8#include <linux/kernel.h>
9#include <linux/slab.h>
10#include <linux/dmaengine.h>
11#include <linux/platform_device.h>
12#include <linux/clk.h>
13#include <linux/delay.h>
1/*
2 * Copyright (C) ST-Ericsson SA 2007-2010
3 * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson
4 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
5 * License terms: GNU General Public License (GPL) version 2
6 */
7
8#include <linux/kernel.h>
9#include <linux/slab.h>
10#include <linux/dmaengine.h>
11#include <linux/platform_device.h>
12#include <linux/clk.h>
13#include <linux/delay.h>
14#include <linux/err.h>
14
15#include <plat/ste_dma40.h>
16
17#include "ste_dma40_ll.h"
18
19#define D40_NAME "dma40"
20
21#define D40_PHY_CHAN -1
22
23/* For masking out/in 2 bit channel positions */
24#define D40_CHAN_POS(chan) (2 * (chan / 2))
25#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
26
27/* Maximum iterations taken before giving up suspending a channel */
28#define D40_SUSPEND_MAX_IT 500
29
30/* Hardware requirement on LCLA alignment */
31#define LCLA_ALIGNMENT 0x40000
15
16#include <plat/ste_dma40.h>
17
18#include "ste_dma40_ll.h"
19
20#define D40_NAME "dma40"
21
22#define D40_PHY_CHAN -1
23
24/* For masking out/in 2 bit channel positions */
25#define D40_CHAN_POS(chan) (2 * (chan / 2))
26#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
27
28/* Maximum iterations taken before giving up suspending a channel */
29#define D40_SUSPEND_MAX_IT 500
30
31/* Hardware requirement on LCLA alignment */
32#define LCLA_ALIGNMENT 0x40000
33
34/* Max number of links per event group */
35#define D40_LCLA_LINK_PER_EVENT_GRP 128
36#define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
37
32/* Attempts before giving up to trying to get pages that are aligned */
33#define MAX_LCLA_ALLOC_ATTEMPTS 256
34
35/* Bit markings for allocation map */
36#define D40_ALLOC_FREE (1 << 31)
37#define D40_ALLOC_PHY (1 << 30)
38#define D40_ALLOC_LOG_FREE 0
39

--- 36 unchanged lines hidden (view full) ---

76 * struct d40_desc - A descriptor is one DMA job.
77 *
78 * @lli_phy: LLI settings for physical channel. Both src and dst=
79 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
80 * lli_len equals one.
81 * @lli_log: Same as above but for logical channels.
82 * @lli_pool: The pool with two entries pre-allocated.
83 * @lli_len: Number of llis of current descriptor.
38/* Attempts before giving up to trying to get pages that are aligned */
39#define MAX_LCLA_ALLOC_ATTEMPTS 256
40
41/* Bit markings for allocation map */
42#define D40_ALLOC_FREE (1 << 31)
43#define D40_ALLOC_PHY (1 << 30)
44#define D40_ALLOC_LOG_FREE 0
45

--- 36 unchanged lines hidden (view full) ---

82 * struct d40_desc - A descriptor is one DMA job.
83 *
84 * @lli_phy: LLI settings for physical channel. Both src and dst=
85 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
86 * lli_len equals one.
87 * @lli_log: Same as above but for logical channels.
88 * @lli_pool: The pool with two entries pre-allocated.
89 * @lli_len: Number of llis of current descriptor.
84 * @lli_count: Number of transfered llis.
85 * @lli_tx_len: Max number of LLIs per transfer, there can be
86 * many transfer for one descriptor.
90 * @lli_current: Number of transfered llis.
91 * @lcla_alloc: Number of LCLA entries allocated.
87 * @txd: DMA engine struct. Used for among other things for communication
88 * during a transfer.
89 * @node: List entry.
90 * @is_in_client_list: true if the client owns this descriptor.
91 * @is_hw_linked: true if this job will automatically be continued for
92 * the previous one.
93 *
94 * This descriptor is used for both logical and physical transfers.
95 */
92 * @txd: DMA engine struct. Used for among other things for communication
93 * during a transfer.
94 * @node: List entry.
95 * @is_in_client_list: true if the client owns this descriptor.
96 * @is_hw_linked: true if this job will automatically be continued for
97 * the previous one.
98 *
99 * This descriptor is used for both logical and physical transfers.
100 */
96
97struct d40_desc {
98 /* LLI physical */
99 struct d40_phy_lli_bidir lli_phy;
100 /* LLI logical */
101 struct d40_log_lli_bidir lli_log;
102
103 struct d40_lli_pool lli_pool;
104 int lli_len;
101struct d40_desc {
102 /* LLI physical */
103 struct d40_phy_lli_bidir lli_phy;
104 /* LLI logical */
105 struct d40_log_lli_bidir lli_log;
106
107 struct d40_lli_pool lli_pool;
108 int lli_len;
105 int lli_count;
106 u32 lli_tx_len;
109 int lli_current;
110 int lcla_alloc;
107
108 struct dma_async_tx_descriptor txd;
109 struct list_head node;
110
111 bool is_in_client_list;
112 bool is_hw_linked;
113};
114
115/**
116 * struct d40_lcla_pool - LCLA pool settings and data.
117 *
118 * @base: The virtual address of LCLA. 18 bit aligned.
119 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
120 * This pointer is only there for clean-up on error.
121 * @pages: The number of pages needed for all physical channels.
122 * Only used later for clean-up on error
123 * @lock: Lock to protect the content in this struct.
111
112 struct dma_async_tx_descriptor txd;
113 struct list_head node;
114
115 bool is_in_client_list;
116 bool is_hw_linked;
117};
118
119/**
120 * struct d40_lcla_pool - LCLA pool settings and data.
121 *
122 * @base: The virtual address of LCLA. 18 bit aligned.
123 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
124 * This pointer is only there for clean-up on error.
125 * @pages: The number of pages needed for all physical channels.
126 * Only used later for clean-up on error
127 * @lock: Lock to protect the content in this struct.
124 * @alloc_map: Bitmap mapping between physical channel and LCLA entries.
125 * @num_blocks: The number of entries of alloc_map. Equals to the
126 * number of physical channels.
128 * @alloc_map: big map over which LCLA entry is own by which job.
127 */
128struct d40_lcla_pool {
129 void *base;
130 void *base_unaligned;
131 int pages;
132 spinlock_t lock;
129 */
130struct d40_lcla_pool {
131 void *base;
132 void *base_unaligned;
133 int pages;
134 spinlock_t lock;
133 u32 *alloc_map;
134 int num_blocks;
135 struct d40_desc **alloc_map;
135};
136
137/**
138 * struct d40_phy_res - struct for handling eventlines mapped to physical
139 * channels.
140 *
141 * @lock: A lock protection this entity.
142 * @num: The physical channel number of this entity.

--- 54 unchanged lines hidden (view full) ---

197 struct list_head active;
198 struct list_head queue;
199 struct stedma40_chan_cfg dma_cfg;
200 struct d40_base *base;
201 /* Default register configurations */
202 u32 src_def_cfg;
203 u32 dst_def_cfg;
204 struct d40_def_lcsp log_def;
136};
137
138/**
139 * struct d40_phy_res - struct for handling eventlines mapped to physical
140 * channels.
141 *
142 * @lock: A lock protection this entity.
143 * @num: The physical channel number of this entity.

--- 54 unchanged lines hidden (view full) ---

198 struct list_head active;
199 struct list_head queue;
200 struct stedma40_chan_cfg dma_cfg;
201 struct d40_base *base;
202 /* Default register configurations */
203 u32 src_def_cfg;
204 u32 dst_def_cfg;
205 struct d40_def_lcsp log_def;
205 struct d40_lcla_elem lcla;
206 struct d40_log_lli_full *lcpa;
207 /* Runtime reconfiguration */
208 dma_addr_t runtime_addr;
209 enum dma_data_direction runtime_direction;
210};
211
212/**
213 * struct d40_base - The big global struct, one for each probe'd instance.

--- 132 unchanged lines hidden (view full) ---

346 d40d->lli_pool.base = NULL;
347 d40d->lli_pool.size = 0;
348 d40d->lli_log.src = NULL;
349 d40d->lli_log.dst = NULL;
350 d40d->lli_phy.src = NULL;
351 d40d->lli_phy.dst = NULL;
352}
353
206 struct d40_log_lli_full *lcpa;
207 /* Runtime reconfiguration */
208 dma_addr_t runtime_addr;
209 enum dma_data_direction runtime_direction;
210};
211
212/**
213 * struct d40_base - The big global struct, one for each probe'd instance.

--- 132 unchanged lines hidden (view full) ---

346 d40d->lli_pool.base = NULL;
347 d40d->lli_pool.size = 0;
348 d40d->lli_log.src = NULL;
349 d40d->lli_log.dst = NULL;
350 d40d->lli_phy.src = NULL;
351 d40d->lli_phy.dst = NULL;
352}
353
354static int d40_lcla_alloc_one(struct d40_chan *d40c,
355 struct d40_desc *d40d)
356{
357 unsigned long flags;
358 int i;
359 int ret = -EINVAL;
360 int p;
361
362 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
363
364 p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP;
365
366 /*
367 * Allocate both src and dst at the same time, therefore the half
368 * start on 1 since 0 can't be used since zero is used as end marker.
369 */
370 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
371 if (!d40c->base->lcla_pool.alloc_map[p + i]) {
372 d40c->base->lcla_pool.alloc_map[p + i] = d40d;
373 d40d->lcla_alloc++;
374 ret = i;
375 break;
376 }
377 }
378
379 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
380
381 return ret;
382}
383
384static int d40_lcla_free_all(struct d40_chan *d40c,
385 struct d40_desc *d40d)
386{
387 unsigned long flags;
388 int i;
389 int ret = -EINVAL;
390
391 if (d40c->log_num == D40_PHY_CHAN)
392 return 0;
393
394 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
395
396 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
397 if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
398 D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) {
399 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
400 D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL;
401 d40d->lcla_alloc--;
402 if (d40d->lcla_alloc == 0) {
403 ret = 0;
404 break;
405 }
406 }
407 }
408
409 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
410
411 return ret;
412
413}
414
354static void d40_desc_remove(struct d40_desc *d40d)
355{
356 list_del(&d40d->node);
357}
358
359static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
360{
361 struct d40_desc *d;

--- 13 unchanged lines hidden (view full) ---

375 INIT_LIST_HEAD(&d->node);
376 }
377 }
378 return d;
379}
380
381static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
382{
415static void d40_desc_remove(struct d40_desc *d40d)
416{
417 list_del(&d40d->node);
418}
419
420static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
421{
422 struct d40_desc *d;

--- 13 unchanged lines hidden (view full) ---

436 INIT_LIST_HEAD(&d->node);
437 }
438 }
439 return d;
440}
441
442static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
443{
444
445 d40_lcla_free_all(d40c, d40d);
383 kmem_cache_free(d40c->base->desc_slab, d40d);
384}
385
386static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
387{
388 list_add_tail(&desc->node, &d40c->active);
389}
390
446 kmem_cache_free(d40c->base->desc_slab, d40d);
447}
448
449static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
450{
451 list_add_tail(&desc->node, &d40c->active);
452}
453
454static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
455{
456 int curr_lcla = -EINVAL, next_lcla;
457
458 if (d40c->log_num == D40_PHY_CHAN) {
459 d40_phy_lli_write(d40c->base->virtbase,
460 d40c->phy_chan->num,
461 d40d->lli_phy.dst,
462 d40d->lli_phy.src);
463 d40d->lli_current = d40d->lli_len;
464 } else {
465
466 if ((d40d->lli_len - d40d->lli_current) > 1)
467 curr_lcla = d40_lcla_alloc_one(d40c, d40d);
468
469 d40_log_lli_lcpa_write(d40c->lcpa,
470 &d40d->lli_log.dst[d40d->lli_current],
471 &d40d->lli_log.src[d40d->lli_current],
472 curr_lcla);
473
474 d40d->lli_current++;
475 for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) {
476 struct d40_log_lli *lcla;
477
478 if (d40d->lli_current + 1 < d40d->lli_len)
479 next_lcla = d40_lcla_alloc_one(d40c, d40d);
480 else
481 next_lcla = -EINVAL;
482
483 lcla = d40c->base->lcla_pool.base +
484 d40c->phy_chan->num * 1024 +
485 8 * curr_lcla * 2;
486
487 d40_log_lli_lcla_write(lcla,
488 &d40d->lli_log.dst[d40d->lli_current],
489 &d40d->lli_log.src[d40d->lli_current],
490 next_lcla);
491
492 (void) dma_map_single(d40c->base->dev, lcla,
493 2 * sizeof(struct d40_log_lli),
494 DMA_TO_DEVICE);
495
496 curr_lcla = next_lcla;
497
498 if (curr_lcla == -EINVAL) {
499 d40d->lli_current++;
500 break;
501 }
502
503 }
504 }
505}
506
391static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
392{
393 struct d40_desc *d;
394
395 if (list_empty(&d40c->active))
396 return NULL;
397
398 d = list_first_entry(&d40c->active,

--- 29 unchanged lines hidden (view full) ---

428 list_for_each_entry(d, &d40c->queue, node)
429 if (list_is_last(&d->node, &d40c->queue))
430 break;
431 return d;
432}
433
434/* Support functions for logical channels */
435
507static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
508{
509 struct d40_desc *d;
510
511 if (list_empty(&d40c->active))
512 return NULL;
513
514 d = list_first_entry(&d40c->active,

--- 29 unchanged lines hidden (view full) ---

544 list_for_each_entry(d, &d40c->queue, node)
545 if (list_is_last(&d->node, &d40c->queue))
546 break;
547 return d;
548}
549
550/* Support functions for logical channels */
551
436static int d40_lcla_id_get(struct d40_chan *d40c)
437{
438 int src_id = 0;
439 int dst_id = 0;
440 struct d40_log_lli *lcla_lidx_base =
441 d40c->base->lcla_pool.base + d40c->phy_chan->num * 1024;
442 int i;
443 int lli_per_log = d40c->base->plat_data->llis_per_log;
444 unsigned long flags;
445
552
446 if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
447 return 0;
448
449 if (d40c->base->lcla_pool.num_blocks > 32)
450 return -EINVAL;
451
452 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
453
454 for (i = 0; i < d40c->base->lcla_pool.num_blocks; i++) {
455 if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &
456 (0x1 << i))) {
457 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |=
458 (0x1 << i);
459 break;
460 }
461 }
462 src_id = i;
463 if (src_id >= d40c->base->lcla_pool.num_blocks)
464 goto err;
465
466 for (; i < d40c->base->lcla_pool.num_blocks; i++) {
467 if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &
468 (0x1 << i))) {
469 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |=
470 (0x1 << i);
471 break;
472 }
473 }
474
475 dst_id = i;
476 if (dst_id == src_id)
477 goto err;
478
479 d40c->lcla.src_id = src_id;
480 d40c->lcla.dst_id = dst_id;
481 d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
482 d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
483
484 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
485 return 0;
486err:
487 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
488 return -EINVAL;
489}
490
491
492static int d40_channel_execute_command(struct d40_chan *d40c,
493 enum d40_command command)
494{
495 u32 status;
496 int i;
497 void __iomem *active_reg;
498 int ret = 0;
499 unsigned long flags;

--- 51 unchanged lines hidden (view full) ---

551done:
552 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
553 return ret;
554}
555
556static void d40_term_all(struct d40_chan *d40c)
557{
558 struct d40_desc *d40d;
553static int d40_channel_execute_command(struct d40_chan *d40c,
554 enum d40_command command)
555{
556 u32 status;
557 int i;
558 void __iomem *active_reg;
559 int ret = 0;
560 unsigned long flags;

--- 51 unchanged lines hidden (view full) ---

612done:
613 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
614 return ret;
615}
616
617static void d40_term_all(struct d40_chan *d40c)
618{
619 struct d40_desc *d40d;
559 unsigned long flags;
560
561 /* Release active descriptors */
562 while ((d40d = d40_first_active_get(d40c))) {
563 d40_desc_remove(d40d);
564 d40_desc_free(d40c, d40d);
565 }
566
567 /* Release queued descriptors waiting for transfer */
568 while ((d40d = d40_first_queued(d40c))) {
569 d40_desc_remove(d40d);
570 d40_desc_free(d40c, d40d);
571 }
572
620
621 /* Release active descriptors */
622 while ((d40d = d40_first_active_get(d40c))) {
623 d40_desc_remove(d40d);
624 d40_desc_free(d40c, d40d);
625 }
626
627 /* Release queued descriptors waiting for transfer */
628 while ((d40d = d40_first_queued(d40c))) {
629 d40_desc_remove(d40d);
630 d40_desc_free(d40c, d40d);
631 }
632
573 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
574
633
575 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &=
576 (~(0x1 << d40c->lcla.dst_id));
577 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &=
578 (~(0x1 << d40c->lcla.src_id));
579
580 d40c->lcla.src_id = -1;
581 d40c->lcla.dst_id = -1;
582
583 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
584
585 d40c->pending_tx = 0;
586 d40c->busy = false;
587}
588
589static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
590{
591 u32 val;
592 unsigned long flags;

--- 84 unchanged lines hidden (view full) ---

677 D40_SREG_ELEM_LOG_LIDX_MASK,
678 d40c->base->virtbase + D40_DREG_PCBASE +
679 d40c->phy_chan->num * D40_DREG_PCDELTA +
680 D40_CHAN_REG_SSELT);
681
682 }
683}
684
634 d40c->pending_tx = 0;
635 d40c->busy = false;
636}
637
638static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
639{
640 u32 val;
641 unsigned long flags;

--- 84 unchanged lines hidden (view full) ---

726 D40_SREG_ELEM_LOG_LIDX_MASK,
727 d40c->base->virtbase + D40_DREG_PCBASE +
728 d40c->phy_chan->num * D40_DREG_PCDELTA +
729 D40_CHAN_REG_SSELT);
730
731 }
732}
733
685static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
686{
687 if (d40c->log_num == D40_PHY_CHAN) {
688 d40_phy_lli_write(d40c->base->virtbase,
689 d40c->phy_chan->num,
690 d40d->lli_phy.dst,
691 d40d->lli_phy.src);
692 } else {
693 struct d40_log_lli *src = d40d->lli_log.src;
694 struct d40_log_lli *dst = d40d->lli_log.dst;
695 int s;
696
697 src += d40d->lli_count;
698 dst += d40d->lli_count;
699 s = d40_log_lli_write(d40c->lcpa,
700 d40c->lcla.src, d40c->lcla.dst,
701 dst, src,
702 d40c->base->plat_data->llis_per_log);
703
704 /* If s equals to zero, the job is not linked */
705 if (s > 0) {
706 (void) dma_map_single(d40c->base->dev, d40c->lcla.src,
707 s * sizeof(struct d40_log_lli),
708 DMA_TO_DEVICE);
709 (void) dma_map_single(d40c->base->dev, d40c->lcla.dst,
710 s * sizeof(struct d40_log_lli),
711 DMA_TO_DEVICE);
712 }
713 }
714 d40d->lli_count += d40d->lli_tx_len;
715}
716
717static u32 d40_residue(struct d40_chan *d40c)
718{
719 u32 num_elt;
720
721 if (d40c->log_num != D40_PHY_CHAN)
722 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
723 >> D40_MEM_LCSP2_ECNT_POS;
724 else

--- 212 unchanged lines hidden (view full) ---

937
938 /* Add to active queue */
939 d40_desc_submit(d40c, d40d);
940
941 /*
942 * If this job is already linked in hw,
943 * do not submit it.
944 */
734static u32 d40_residue(struct d40_chan *d40c)
735{
736 u32 num_elt;
737
738 if (d40c->log_num != D40_PHY_CHAN)
739 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
740 >> D40_MEM_LCSP2_ECNT_POS;
741 else

--- 212 unchanged lines hidden (view full) ---

954
955 /* Add to active queue */
956 d40_desc_submit(d40c, d40d);
957
958 /*
959 * If this job is already linked in hw,
960 * do not submit it.
961 */
962
945 if (!d40d->is_hw_linked) {
946 /* Initiate DMA job */
947 d40_desc_load(d40c, d40d);
948
949 /* Start dma job */
950 err = d40_start(d40c);
951
952 if (err)

--- 10 unchanged lines hidden (view full) ---

963 struct d40_desc *d40d;
964
965 /* Get first active entry from list */
966 d40d = d40_first_active_get(d40c);
967
968 if (d40d == NULL)
969 return;
970
963 if (!d40d->is_hw_linked) {
964 /* Initiate DMA job */
965 d40_desc_load(d40c, d40d);
966
967 /* Start dma job */
968 err = d40_start(d40c);
969
970 if (err)

--- 10 unchanged lines hidden (view full) ---

981 struct d40_desc *d40d;
982
983 /* Get first active entry from list */
984 d40d = d40_first_active_get(d40c);
985
986 if (d40d == NULL)
987 return;
988
971 if (d40d->lli_count < d40d->lli_len) {
989 d40_lcla_free_all(d40c, d40d);
972
990
991 if (d40d->lli_current < d40d->lli_len) {
973 d40_desc_load(d40c, d40d);
974 /* Start dma job */
975 (void) d40_start(d40c);
976 return;
977 }
978
979 if (d40_queue_start(d40c) == NULL)
980 d40c->busy = false;

--- 36 unchanged lines hidden (view full) ---

1017
1018 if (async_tx_test_ack(&d40d->txd)) {
1019 d40_pool_lli_free(d40d);
1020 d40_desc_remove(d40d);
1021 d40_desc_free(d40c, d40d);
1022 } else {
1023 if (!d40d->is_in_client_list) {
1024 d40_desc_remove(d40d);
992 d40_desc_load(d40c, d40d);
993 /* Start dma job */
994 (void) d40_start(d40c);
995 return;
996 }
997
998 if (d40_queue_start(d40c) == NULL)
999 d40c->busy = false;

--- 36 unchanged lines hidden (view full) ---

1036
1037 if (async_tx_test_ack(&d40d->txd)) {
1038 d40_pool_lli_free(d40d);
1039 d40_desc_remove(d40d);
1040 d40_desc_free(d40c, d40d);
1041 } else {
1042 if (!d40d->is_in_client_list) {
1043 d40_desc_remove(d40d);
1044 d40_lcla_free_all(d40c, d40d);
1025 list_add_tail(&d40d->node, &d40c->client);
1026 d40d->is_in_client_list = true;
1027 }
1028 }
1029
1030 d40c->pending_tx--;
1031
1032 if (d40c->pending_tx)

--- 209 unchanged lines hidden (view full) ---

1242static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1243 int log_event_line)
1244{
1245 unsigned long flags;
1246 bool is_free = false;
1247
1248 spin_lock_irqsave(&phy->lock, flags);
1249 if (!log_event_line) {
1045 list_add_tail(&d40d->node, &d40c->client);
1046 d40d->is_in_client_list = true;
1047 }
1048 }
1049
1050 d40c->pending_tx--;
1051
1052 if (d40c->pending_tx)

--- 209 unchanged lines hidden (view full) ---

1262static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1263 int log_event_line)
1264{
1265 unsigned long flags;
1266 bool is_free = false;
1267
1268 spin_lock_irqsave(&phy->lock, flags);
1269 if (!log_event_line) {
1250 /* Physical interrupts are masked per physical full channel */
1251 phy->allocated_dst = D40_ALLOC_FREE;
1252 phy->allocated_src = D40_ALLOC_FREE;
1253 is_free = true;
1254 goto out;
1255 }
1256
1257 /* Logical channel */
1258 if (is_src) {

--- 369 unchanged lines hidden (view full) ---

1628
1629 spin_lock_irqsave(&d40c->lock, flags);
1630 d40d = d40_desc_get(d40c);
1631
1632 if (d40d == NULL)
1633 goto err;
1634
1635 d40d->lli_len = sgl_len;
1270 phy->allocated_dst = D40_ALLOC_FREE;
1271 phy->allocated_src = D40_ALLOC_FREE;
1272 is_free = true;
1273 goto out;
1274 }
1275
1276 /* Logical channel */
1277 if (is_src) {

--- 369 unchanged lines hidden (view full) ---

1647
1648 spin_lock_irqsave(&d40c->lock, flags);
1649 d40d = d40_desc_get(d40c);
1650
1651 if (d40d == NULL)
1652 goto err;
1653
1654 d40d->lli_len = sgl_len;
1636 d40d->lli_tx_len = d40d->lli_len;
1655 d40d->lli_current = 0;
1637 d40d->txd.flags = dma_flags;
1638
1639 if (d40c->log_num != D40_PHY_CHAN) {
1656 d40d->txd.flags = dma_flags;
1657
1658 if (d40c->log_num != D40_PHY_CHAN) {
1640 if (d40d->lli_len > d40c->base->plat_data->llis_per_log)
1641 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1642
1659
1643 if (sgl_len > 1)
1644 /*
1645 * Check if there is space available in lcla. If not,
1646 * split list into 1-length and run only in lcpa
1647 * space.
1648 */
1649 if (d40_lcla_id_get(d40c) != 0)
1650 d40d->lli_tx_len = 1;
1651
1652 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
1653 dev_err(&d40c->chan.dev->device,
1654 "[%s] Out of memory\n", __func__);
1655 goto err;
1656 }
1657
1660 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
1661 dev_err(&d40c->chan.dev->device,
1662 "[%s] Out of memory\n", __func__);
1663 goto err;
1664 }
1665
1658 (void) d40_log_sg_to_lli(d40c->lcla.src_id,
1659 sgl_src,
1666 (void) d40_log_sg_to_lli(sgl_src,
1660 sgl_len,
1661 d40d->lli_log.src,
1662 d40c->log_def.lcsp1,
1667 sgl_len,
1668 d40d->lli_log.src,
1669 d40c->log_def.lcsp1,
1663 d40c->dma_cfg.src_info.data_width,
1664 d40d->lli_tx_len,
1665 d40c->base->plat_data->llis_per_log);
1670 d40c->dma_cfg.src_info.data_width);
1666
1671
1667 (void) d40_log_sg_to_lli(d40c->lcla.dst_id,
1668 sgl_dst,
1672 (void) d40_log_sg_to_lli(sgl_dst,
1669 sgl_len,
1670 d40d->lli_log.dst,
1671 d40c->log_def.lcsp3,
1673 sgl_len,
1674 d40d->lli_log.dst,
1675 d40c->log_def.lcsp3,
1672 d40c->dma_cfg.dst_info.data_width,
1673 d40d->lli_tx_len,
1674 d40c->base->plat_data->llis_per_log);
1675
1676
1676 d40c->dma_cfg.dst_info.data_width);
1677 } else {
1678 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1679 dev_err(&d40c->chan.dev->device,
1680 "[%s] Out of memory\n", __func__);
1681 goto err;
1682 }
1683
1684 res = d40_phy_sg_to_lli(sgl_src,

--- 179 unchanged lines hidden (view full) ---

1864 if (d40c->log_num != D40_PHY_CHAN) {
1865
1866 if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
1867 dev_err(&d40c->chan.dev->device,
1868 "[%s] Out of memory\n", __func__);
1869 goto err;
1870 }
1871 d40d->lli_len = 1;
1677 } else {
1678 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1679 dev_err(&d40c->chan.dev->device,
1680 "[%s] Out of memory\n", __func__);
1681 goto err;
1682 }
1683
1684 res = d40_phy_sg_to_lli(sgl_src,

--- 179 unchanged lines hidden (view full) ---

1864 if (d40c->log_num != D40_PHY_CHAN) {
1865
1866 if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
1867 dev_err(&d40c->chan.dev->device,
1868 "[%s] Out of memory\n", __func__);
1869 goto err;
1870 }
1871 d40d->lli_len = 1;
1872 d40d->lli_tx_len = 1;
1872 d40d->lli_current = 0;
1873
1874 d40_log_fill_lli(d40d->lli_log.src,
1875 src,
1876 size,
1873
1874 d40_log_fill_lli(d40d->lli_log.src,
1875 src,
1876 size,
1877 0,
1878 d40c->log_def.lcsp1,
1879 d40c->dma_cfg.src_info.data_width,
1877 d40c->log_def.lcsp1,
1878 d40c->dma_cfg.src_info.data_width,
1880 false, true);
1879 true);
1881
1882 d40_log_fill_lli(d40d->lli_log.dst,
1883 dst,
1884 size,
1880
1881 d40_log_fill_lli(d40d->lli_log.dst,
1882 dst,
1883 size,
1885 0,
1886 d40c->log_def.lcsp3,
1887 d40c->dma_cfg.dst_info.data_width,
1884 d40c->log_def.lcsp3,
1885 d40c->dma_cfg.dst_info.data_width,
1888 true, true);
1886 true);
1889
1890 } else {
1891
1892 if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
1893 dev_err(&d40c->chan.dev->device,
1894 "[%s] Out of memory\n", __func__);
1895 goto err;
1896 }

--- 51 unchanged lines hidden (view full) ---

1948
1949 if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
1950 dev_err(&d40c->chan.dev->device,
1951 "[%s] Out of memory\n", __func__);
1952 return -ENOMEM;
1953 }
1954
1955 d40d->lli_len = sg_len;
1887
1888 } else {
1889
1890 if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
1891 dev_err(&d40c->chan.dev->device,
1892 "[%s] Out of memory\n", __func__);
1893 goto err;
1894 }

--- 51 unchanged lines hidden (view full) ---

1946
1947 if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
1948 dev_err(&d40c->chan.dev->device,
1949 "[%s] Out of memory\n", __func__);
1950 return -ENOMEM;
1951 }
1952
1953 d40d->lli_len = sg_len;
1956 if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
1957 d40d->lli_tx_len = d40d->lli_len;
1958 else
1959 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1954 d40d->lli_current = 0;
1960
1955
1961 if (sg_len > 1)
1962 /*
1963 * Check if there is space available in lcla.
1964 * If not, split list into 1-length and run only
1965 * in lcpa space.
1966 */
1967 if (d40_lcla_id_get(d40c) != 0)
1968 d40d->lli_tx_len = 1;
1969
1970 if (direction == DMA_FROM_DEVICE)
1971 if (d40c->runtime_addr)
1972 dev_addr = d40c->runtime_addr;
1973 else
1974 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1975 else if (direction == DMA_TO_DEVICE)
1976 if (d40c->runtime_addr)
1977 dev_addr = d40c->runtime_addr;
1978 else
1979 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1980
1981 else
1982 return -EINVAL;
1983
1956 if (direction == DMA_FROM_DEVICE)
1957 if (d40c->runtime_addr)
1958 dev_addr = d40c->runtime_addr;
1959 else
1960 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1961 else if (direction == DMA_TO_DEVICE)
1962 if (d40c->runtime_addr)
1963 dev_addr = d40c->runtime_addr;
1964 else
1965 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1966
1967 else
1968 return -EINVAL;
1969
1984 total_size = d40_log_sg_to_dev(&d40c->lcla,
1985 sgl, sg_len,
1970 total_size = d40_log_sg_to_dev(sgl, sg_len,
1986 &d40d->lli_log,
1987 &d40c->log_def,
1988 d40c->dma_cfg.src_info.data_width,
1989 d40c->dma_cfg.dst_info.data_width,
1990 direction,
1971 &d40d->lli_log,
1972 &d40c->log_def,
1973 d40c->dma_cfg.src_info.data_width,
1974 d40c->dma_cfg.dst_info.data_width,
1975 direction,
1991 dev_addr, d40d->lli_tx_len,
1992 d40c->base->plat_data->llis_per_log);
1976 dev_addr);
1993
1994 if (total_size < 0)
1995 return -EINVAL;
1996
1997 return 0;
1998}
1999
2000static int d40_prep_slave_sg_phy(struct d40_desc *d40d,

--- 9 unchanged lines hidden (view full) ---

2010
2011 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
2012 dev_err(&d40c->chan.dev->device,
2013 "[%s] Out of memory\n", __func__);
2014 return -ENOMEM;
2015 }
2016
2017 d40d->lli_len = sgl_len;
1977
1978 if (total_size < 0)
1979 return -EINVAL;
1980
1981 return 0;
1982}
1983
1984static int d40_prep_slave_sg_phy(struct d40_desc *d40d,

--- 9 unchanged lines hidden (view full) ---

1994
1995 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1996 dev_err(&d40c->chan.dev->device,
1997 "[%s] Out of memory\n", __func__);
1998 return -ENOMEM;
1999 }
2000
2001 d40d->lli_len = sgl_len;
2018 d40d->lli_tx_len = sgl_len;
2002 d40d->lli_current = 0;
2019
2020 if (direction == DMA_FROM_DEVICE) {
2021 dst_dev_addr = 0;
2022 if (d40c->runtime_addr)
2023 src_dev_addr = d40c->runtime_addr;
2024 else
2025 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
2026 } else if (direction == DMA_TO_DEVICE) {

--- 291 unchanged lines hidden (view full) ---

2318
2319 INIT_LIST_HEAD(&dma->channels);
2320
2321 for (i = offset; i < offset + num_chans; i++) {
2322 d40c = &chans[i];
2323 d40c->base = base;
2324 d40c->chan.device = dma;
2325
2003
2004 if (direction == DMA_FROM_DEVICE) {
2005 dst_dev_addr = 0;
2006 if (d40c->runtime_addr)
2007 src_dev_addr = d40c->runtime_addr;
2008 else
2009 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
2010 } else if (direction == DMA_TO_DEVICE) {

--- 291 unchanged lines hidden (view full) ---

2302
2303 INIT_LIST_HEAD(&dma->channels);
2304
2305 for (i = offset; i < offset + num_chans; i++) {
2306 d40c = &chans[i];
2307 d40c->base = base;
2308 d40c->chan.device = dma;
2309
2326 /* Invalidate lcla element */
2327 d40c->lcla.src_id = -1;
2328 d40c->lcla.dst_id = -1;
2329
2330 spin_lock_init(&d40c->lock);
2331
2332 d40c->log_num = D40_PHY_CHAN;
2333
2334 INIT_LIST_HEAD(&d40c->active);
2335 INIT_LIST_HEAD(&d40c->queue);
2336 INIT_LIST_HEAD(&d40c->client);
2337

--- 288 unchanged lines hidden (view full) ---

2626 * src devices and dst devices
2627 */
2628 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2629 sizeof(struct d40_chan *),
2630 GFP_KERNEL);
2631 if (!base->lookup_log_chans)
2632 goto failure;
2633 }
2310 spin_lock_init(&d40c->lock);
2311
2312 d40c->log_num = D40_PHY_CHAN;
2313
2314 INIT_LIST_HEAD(&d40c->active);
2315 INIT_LIST_HEAD(&d40c->queue);
2316 INIT_LIST_HEAD(&d40c->client);
2317

--- 288 unchanged lines hidden (view full) ---

2606 * src devices and dst devices
2607 */
2608 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2609 sizeof(struct d40_chan *),
2610 GFP_KERNEL);
2611 if (!base->lookup_log_chans)
2612 goto failure;
2613 }
2634 base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32),
2614
2615 base->lcla_pool.alloc_map = kzalloc(num_phy_chans *
2616 sizeof(struct d40_desc *) *
2617 D40_LCLA_LINK_PER_EVENT_GRP,
2635 GFP_KERNEL);
2636 if (!base->lcla_pool.alloc_map)
2637 goto failure;
2638
2639 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
2640 0, SLAB_HWCACHE_ALIGN,
2641 NULL);
2642 if (base->desc_slab == NULL)

--- 230 unchanged lines hidden (view full) ---

2873 if (ret) {
2874 dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n",
2875 __func__);
2876 goto failure;
2877 }
2878
2879 spin_lock_init(&base->lcla_pool.lock);
2880
2618 GFP_KERNEL);
2619 if (!base->lcla_pool.alloc_map)
2620 goto failure;
2621
2622 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
2623 0, SLAB_HWCACHE_ALIGN,
2624 NULL);
2625 if (base->desc_slab == NULL)

--- 230 unchanged lines hidden (view full) ---

2856 if (ret) {
2857 dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n",
2858 __func__);
2859 goto failure;
2860 }
2861
2862 spin_lock_init(&base->lcla_pool.lock);
2863
2881 base->lcla_pool.num_blocks = base->num_phy_chans;
2882
2883 base->irq = platform_get_irq(pdev, 0);
2884
2885 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
2886
2887 if (ret) {
2888 dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
2889 goto failure;
2890 }

--- 56 unchanged lines hidden ---
2864 base->irq = platform_get_irq(pdev, 0);
2865
2866 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
2867
2868 if (ret) {
2869 dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
2870 goto failure;
2871 }

--- 56 unchanged lines hidden ---