xref: /linux/drivers/dma/ste_dma40.c (revision d181b3a8cb2fc1732ad1826a5e6fdccab03e6a51)
18d318a50SLinus Walleij /*
28d318a50SLinus Walleij  * driver/dma/ste_dma40.c
38d318a50SLinus Walleij  *
48d318a50SLinus Walleij  * Copyright (C) ST-Ericsson 2007-2010
58d318a50SLinus Walleij  * License terms: GNU General Public License (GPL) version 2
68d318a50SLinus Walleij  * Author: Per Friden <per.friden@stericsson.com>
78d318a50SLinus Walleij  * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
88d318a50SLinus Walleij  *
98d318a50SLinus Walleij  */
108d318a50SLinus Walleij 
118d318a50SLinus Walleij #include <linux/kernel.h>
128d318a50SLinus Walleij #include <linux/slab.h>
138d318a50SLinus Walleij #include <linux/dmaengine.h>
148d318a50SLinus Walleij #include <linux/platform_device.h>
158d318a50SLinus Walleij #include <linux/clk.h>
168d318a50SLinus Walleij #include <linux/delay.h>
178d318a50SLinus Walleij 
188d318a50SLinus Walleij #include <plat/ste_dma40.h>
198d318a50SLinus Walleij 
208d318a50SLinus Walleij #include "ste_dma40_ll.h"
218d318a50SLinus Walleij 
228d318a50SLinus Walleij #define D40_NAME "dma40"
238d318a50SLinus Walleij 
248d318a50SLinus Walleij #define D40_PHY_CHAN -1
258d318a50SLinus Walleij 
268d318a50SLinus Walleij /* For masking out/in 2 bit channel positions */
278d318a50SLinus Walleij #define D40_CHAN_POS(chan)  (2 * (chan / 2))
288d318a50SLinus Walleij #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
298d318a50SLinus Walleij 
308d318a50SLinus Walleij /* Maximum iterations taken before giving up suspending a channel */
318d318a50SLinus Walleij #define D40_SUSPEND_MAX_IT 500
328d318a50SLinus Walleij 
33508849adSLinus Walleij /* Hardware requirement on LCLA alignment */
34508849adSLinus Walleij #define LCLA_ALIGNMENT 0x40000
35508849adSLinus Walleij /* Attempts before giving up to trying to get pages that are aligned */
36508849adSLinus Walleij #define MAX_LCLA_ALLOC_ATTEMPTS 256
37508849adSLinus Walleij 
38508849adSLinus Walleij /* Bit markings for allocation map */
398d318a50SLinus Walleij #define D40_ALLOC_FREE		(1 << 31)
408d318a50SLinus Walleij #define D40_ALLOC_PHY		(1 << 30)
418d318a50SLinus Walleij #define D40_ALLOC_LOG_FREE	0
428d318a50SLinus Walleij 
438d318a50SLinus Walleij /* Hardware designer of the block */
448d318a50SLinus Walleij #define D40_PERIPHID2_DESIGNER 0x8
458d318a50SLinus Walleij 
468d318a50SLinus Walleij /**
478d318a50SLinus Walleij  * enum 40_command - The different commands and/or statuses.
488d318a50SLinus Walleij  *
498d318a50SLinus Walleij  * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
508d318a50SLinus Walleij  * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
518d318a50SLinus Walleij  * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
528d318a50SLinus Walleij  * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
538d318a50SLinus Walleij  */
548d318a50SLinus Walleij enum d40_command {
558d318a50SLinus Walleij 	D40_DMA_STOP		= 0,
568d318a50SLinus Walleij 	D40_DMA_RUN		= 1,
578d318a50SLinus Walleij 	D40_DMA_SUSPEND_REQ	= 2,
588d318a50SLinus Walleij 	D40_DMA_SUSPENDED	= 3
598d318a50SLinus Walleij };
608d318a50SLinus Walleij 
618d318a50SLinus Walleij /**
628d318a50SLinus Walleij  * struct d40_lli_pool - Structure for keeping LLIs in memory
638d318a50SLinus Walleij  *
648d318a50SLinus Walleij  * @base: Pointer to memory area when the pre_alloc_lli's are not large
658d318a50SLinus Walleij  * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
668d318a50SLinus Walleij  * pre_alloc_lli is used.
678d318a50SLinus Walleij  * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
688d318a50SLinus Walleij  * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
698d318a50SLinus Walleij  * one buffer to one buffer.
708d318a50SLinus Walleij  */
718d318a50SLinus Walleij struct d40_lli_pool {
728d318a50SLinus Walleij 	void	*base;
738d318a50SLinus Walleij 	int	 size;
748d318a50SLinus Walleij 	/* Space for dst and src, plus an extra for padding */
758d318a50SLinus Walleij 	u8	 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
768d318a50SLinus Walleij };
778d318a50SLinus Walleij 
788d318a50SLinus Walleij /**
798d318a50SLinus Walleij  * struct d40_desc - A descriptor is one DMA job.
808d318a50SLinus Walleij  *
818d318a50SLinus Walleij  * @lli_phy: LLI settings for physical channel. Both src and dst=
828d318a50SLinus Walleij  * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
838d318a50SLinus Walleij  * lli_len equals one.
848d318a50SLinus Walleij  * @lli_log: Same as above but for logical channels.
858d318a50SLinus Walleij  * @lli_pool: The pool with two entries pre-allocated.
86941b77a3SPer Friden  * @lli_len: Number of llis of current descriptor.
87941b77a3SPer Friden  * @lli_count: Number of transfered llis.
88941b77a3SPer Friden  * @lli_tx_len: Max number of LLIs per transfer, there can be
89941b77a3SPer Friden  * many transfer for one descriptor.
908d318a50SLinus Walleij  * @txd: DMA engine struct. Used for among other things for communication
918d318a50SLinus Walleij  * during a transfer.
928d318a50SLinus Walleij  * @node: List entry.
938d318a50SLinus Walleij  * @dir: The transfer direction of this job.
948d318a50SLinus Walleij  * @is_in_client_list: true if the client owns this descriptor.
958d318a50SLinus Walleij  *
968d318a50SLinus Walleij  * This descriptor is used for both logical and physical transfers.
978d318a50SLinus Walleij  */
988d318a50SLinus Walleij 
998d318a50SLinus Walleij struct d40_desc {
1008d318a50SLinus Walleij 	/* LLI physical */
1018d318a50SLinus Walleij 	struct d40_phy_lli_bidir	 lli_phy;
1028d318a50SLinus Walleij 	/* LLI logical */
1038d318a50SLinus Walleij 	struct d40_log_lli_bidir	 lli_log;
1048d318a50SLinus Walleij 
1058d318a50SLinus Walleij 	struct d40_lli_pool		 lli_pool;
106941b77a3SPer Friden 	int				 lli_len;
107941b77a3SPer Friden 	int				 lli_count;
108941b77a3SPer Friden 	u32				 lli_tx_len;
1098d318a50SLinus Walleij 
1108d318a50SLinus Walleij 	struct dma_async_tx_descriptor	 txd;
1118d318a50SLinus Walleij 	struct list_head		 node;
1128d318a50SLinus Walleij 
1138d318a50SLinus Walleij 	enum dma_data_direction		 dir;
1148d318a50SLinus Walleij 	bool				 is_in_client_list;
1158d318a50SLinus Walleij };
1168d318a50SLinus Walleij 
1178d318a50SLinus Walleij /**
1188d318a50SLinus Walleij  * struct d40_lcla_pool - LCLA pool settings and data.
1198d318a50SLinus Walleij  *
120508849adSLinus Walleij  * @base: The virtual address of LCLA. 18 bit aligned.
121508849adSLinus Walleij  * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
122508849adSLinus Walleij  * This pointer is only there for clean-up on error.
123508849adSLinus Walleij  * @pages: The number of pages needed for all physical channels.
124508849adSLinus Walleij  * Only used later for clean-up on error
1258d318a50SLinus Walleij  * @lock: Lock to protect the content in this struct.
126508849adSLinus Walleij  * @alloc_map: Bitmap mapping between physical channel and LCLA entries.
1278d318a50SLinus Walleij  * @num_blocks: The number of entries of alloc_map. Equals to the
1288d318a50SLinus Walleij  * number of physical channels.
1298d318a50SLinus Walleij  */
1308d318a50SLinus Walleij struct d40_lcla_pool {
1318d318a50SLinus Walleij 	void		*base;
132508849adSLinus Walleij 	void		*base_unaligned;
133508849adSLinus Walleij 	int		 pages;
1348d318a50SLinus Walleij 	spinlock_t	 lock;
1358d318a50SLinus Walleij 	u32		*alloc_map;
1368d318a50SLinus Walleij 	int		 num_blocks;
1378d318a50SLinus Walleij };
1388d318a50SLinus Walleij 
1398d318a50SLinus Walleij /**
1408d318a50SLinus Walleij  * struct d40_phy_res - struct for handling eventlines mapped to physical
1418d318a50SLinus Walleij  * channels.
1428d318a50SLinus Walleij  *
1438d318a50SLinus Walleij  * @lock: A lock protection this entity.
1448d318a50SLinus Walleij  * @num: The physical channel number of this entity.
1458d318a50SLinus Walleij  * @allocated_src: Bit mapped to show which src event line's are mapped to
1468d318a50SLinus Walleij  * this physical channel. Can also be free or physically allocated.
1478d318a50SLinus Walleij  * @allocated_dst: Same as for src but is dst.
1488d318a50SLinus Walleij  * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
1498d318a50SLinus Walleij  * event line number. Both allocated_src and allocated_dst can not be
1508d318a50SLinus Walleij  * allocated to a physical channel, since the interrupt handler has then
1518d318a50SLinus Walleij  * no way of figure out which one the interrupt belongs to.
1528d318a50SLinus Walleij  */
1538d318a50SLinus Walleij struct d40_phy_res {
1548d318a50SLinus Walleij 	spinlock_t lock;
1558d318a50SLinus Walleij 	int	   num;
1568d318a50SLinus Walleij 	u32	   allocated_src;
1578d318a50SLinus Walleij 	u32	   allocated_dst;
1588d318a50SLinus Walleij };
1598d318a50SLinus Walleij 
1608d318a50SLinus Walleij struct d40_base;
1618d318a50SLinus Walleij 
1628d318a50SLinus Walleij /**
1638d318a50SLinus Walleij  * struct d40_chan - Struct that describes a channel.
1648d318a50SLinus Walleij  *
1658d318a50SLinus Walleij  * @lock: A spinlock to protect this struct.
1668d318a50SLinus Walleij  * @log_num: The logical number, if any of this channel.
1678d318a50SLinus Walleij  * @completed: Starts with 1, after first interrupt it is set to dma engine's
1688d318a50SLinus Walleij  * current cookie.
1698d318a50SLinus Walleij  * @pending_tx: The number of pending transfers. Used between interrupt handler
1708d318a50SLinus Walleij  * and tasklet.
1718d318a50SLinus Walleij  * @busy: Set to true when transfer is ongoing on this channel.
1722a614340SJonas Aaberg  * @phy_chan: Pointer to physical channel which this instance runs on. If this
1732a614340SJonas Aaberg  * point is NULL, then the channel is not allocated.
1748d318a50SLinus Walleij  * @chan: DMA engine handle.
1758d318a50SLinus Walleij  * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
1768d318a50SLinus Walleij  * transfer and call client callback.
1778d318a50SLinus Walleij  * @client: Cliented owned descriptor list.
1788d318a50SLinus Walleij  * @active: Active descriptor.
1798d318a50SLinus Walleij  * @queue: Queued jobs.
1808d318a50SLinus Walleij  * @dma_cfg: The client configuration of this dma channel.
1818d318a50SLinus Walleij  * @base: Pointer to the device instance struct.
1828d318a50SLinus Walleij  * @src_def_cfg: Default cfg register setting for src.
1838d318a50SLinus Walleij  * @dst_def_cfg: Default cfg register setting for dst.
1848d318a50SLinus Walleij  * @log_def: Default logical channel settings.
1858d318a50SLinus Walleij  * @lcla: Space for one dst src pair for logical channel transfers.
1868d318a50SLinus Walleij  * @lcpa: Pointer to dst and src lcpa settings.
1878d318a50SLinus Walleij  *
1888d318a50SLinus Walleij  * This struct can either "be" a logical or a physical channel.
1898d318a50SLinus Walleij  */
1908d318a50SLinus Walleij struct d40_chan {
1918d318a50SLinus Walleij 	spinlock_t			 lock;
1928d318a50SLinus Walleij 	int				 log_num;
1938d318a50SLinus Walleij 	/* ID of the most recent completed transfer */
1948d318a50SLinus Walleij 	int				 completed;
1958d318a50SLinus Walleij 	int				 pending_tx;
1968d318a50SLinus Walleij 	bool				 busy;
1978d318a50SLinus Walleij 	struct d40_phy_res		*phy_chan;
1988d318a50SLinus Walleij 	struct dma_chan			 chan;
1998d318a50SLinus Walleij 	struct tasklet_struct		 tasklet;
2008d318a50SLinus Walleij 	struct list_head		 client;
2018d318a50SLinus Walleij 	struct list_head		 active;
2028d318a50SLinus Walleij 	struct list_head		 queue;
2038d318a50SLinus Walleij 	struct stedma40_chan_cfg	 dma_cfg;
2048d318a50SLinus Walleij 	struct d40_base			*base;
2058d318a50SLinus Walleij 	/* Default register configurations */
2068d318a50SLinus Walleij 	u32				 src_def_cfg;
2078d318a50SLinus Walleij 	u32				 dst_def_cfg;
2088d318a50SLinus Walleij 	struct d40_def_lcsp		 log_def;
2098d318a50SLinus Walleij 	struct d40_lcla_elem		 lcla;
2108d318a50SLinus Walleij 	struct d40_log_lli_full		*lcpa;
2118d318a50SLinus Walleij };
2128d318a50SLinus Walleij 
2138d318a50SLinus Walleij /**
2148d318a50SLinus Walleij  * struct d40_base - The big global struct, one for each probe'd instance.
2158d318a50SLinus Walleij  *
2168d318a50SLinus Walleij  * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
2178d318a50SLinus Walleij  * @execmd_lock: Lock for execute command usage since several channels share
2188d318a50SLinus Walleij  * the same physical register.
2198d318a50SLinus Walleij  * @dev: The device structure.
2208d318a50SLinus Walleij  * @virtbase: The virtual base address of the DMA's register.
2218d318a50SLinus Walleij  * @clk: Pointer to the DMA clock structure.
2228d318a50SLinus Walleij  * @phy_start: Physical memory start of the DMA registers.
2238d318a50SLinus Walleij  * @phy_size: Size of the DMA register map.
2248d318a50SLinus Walleij  * @irq: The IRQ number.
2258d318a50SLinus Walleij  * @num_phy_chans: The number of physical channels. Read from HW. This
2268d318a50SLinus Walleij  * is the number of available channels for this driver, not counting "Secure
2278d318a50SLinus Walleij  * mode" allocated physical channels.
2288d318a50SLinus Walleij  * @num_log_chans: The number of logical channels. Calculated from
2298d318a50SLinus Walleij  * num_phy_chans.
2308d318a50SLinus Walleij  * @dma_both: dma_device channels that can do both memcpy and slave transfers.
2318d318a50SLinus Walleij  * @dma_slave: dma_device channels that can do only do slave transfers.
2328d318a50SLinus Walleij  * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
2338d318a50SLinus Walleij  * @phy_chans: Room for all possible physical channels in system.
2348d318a50SLinus Walleij  * @log_chans: Room for all possible logical channels in system.
2358d318a50SLinus Walleij  * @lookup_log_chans: Used to map interrupt number to logical channel. Points
2368d318a50SLinus Walleij  * to log_chans entries.
2378d318a50SLinus Walleij  * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
2388d318a50SLinus Walleij  * to phy_chans entries.
2398d318a50SLinus Walleij  * @plat_data: Pointer to provided platform_data which is the driver
2408d318a50SLinus Walleij  * configuration.
2418d318a50SLinus Walleij  * @phy_res: Vector containing all physical channels.
2428d318a50SLinus Walleij  * @lcla_pool: lcla pool settings and data.
2438d318a50SLinus Walleij  * @lcpa_base: The virtual mapped address of LCPA.
2448d318a50SLinus Walleij  * @phy_lcpa: The physical address of the LCPA.
2458d318a50SLinus Walleij  * @lcpa_size: The size of the LCPA area.
246c675b1b4SJonas Aaberg  * @desc_slab: cache for descriptors.
2478d318a50SLinus Walleij  */
2488d318a50SLinus Walleij struct d40_base {
2498d318a50SLinus Walleij 	spinlock_t			 interrupt_lock;
2508d318a50SLinus Walleij 	spinlock_t			 execmd_lock;
2518d318a50SLinus Walleij 	struct device			 *dev;
2528d318a50SLinus Walleij 	void __iomem			 *virtbase;
2538d318a50SLinus Walleij 	struct clk			 *clk;
2548d318a50SLinus Walleij 	phys_addr_t			  phy_start;
2558d318a50SLinus Walleij 	resource_size_t			  phy_size;
2568d318a50SLinus Walleij 	int				  irq;
2578d318a50SLinus Walleij 	int				  num_phy_chans;
2588d318a50SLinus Walleij 	int				  num_log_chans;
2598d318a50SLinus Walleij 	struct dma_device		  dma_both;
2608d318a50SLinus Walleij 	struct dma_device		  dma_slave;
2618d318a50SLinus Walleij 	struct dma_device		  dma_memcpy;
2628d318a50SLinus Walleij 	struct d40_chan			 *phy_chans;
2638d318a50SLinus Walleij 	struct d40_chan			 *log_chans;
2648d318a50SLinus Walleij 	struct d40_chan			**lookup_log_chans;
2658d318a50SLinus Walleij 	struct d40_chan			**lookup_phy_chans;
2668d318a50SLinus Walleij 	struct stedma40_platform_data	 *plat_data;
2678d318a50SLinus Walleij 	/* Physical half channels */
2688d318a50SLinus Walleij 	struct d40_phy_res		 *phy_res;
2698d318a50SLinus Walleij 	struct d40_lcla_pool		  lcla_pool;
2708d318a50SLinus Walleij 	void				 *lcpa_base;
2718d318a50SLinus Walleij 	dma_addr_t			  phy_lcpa;
2728d318a50SLinus Walleij 	resource_size_t			  lcpa_size;
273c675b1b4SJonas Aaberg 	struct kmem_cache		 *desc_slab;
2748d318a50SLinus Walleij };
2758d318a50SLinus Walleij 
2768d318a50SLinus Walleij /**
2778d318a50SLinus Walleij  * struct d40_interrupt_lookup - lookup table for interrupt handler
2788d318a50SLinus Walleij  *
2798d318a50SLinus Walleij  * @src: Interrupt mask register.
2808d318a50SLinus Walleij  * @clr: Interrupt clear register.
2818d318a50SLinus Walleij  * @is_error: true if this is an error interrupt.
2828d318a50SLinus Walleij  * @offset: start delta in the lookup_log_chans in d40_base. If equals to
2838d318a50SLinus Walleij  * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
2848d318a50SLinus Walleij  */
2858d318a50SLinus Walleij struct d40_interrupt_lookup {
2868d318a50SLinus Walleij 	u32 src;
2878d318a50SLinus Walleij 	u32 clr;
2888d318a50SLinus Walleij 	bool is_error;
2898d318a50SLinus Walleij 	int offset;
2908d318a50SLinus Walleij };
2918d318a50SLinus Walleij 
2928d318a50SLinus Walleij /**
2938d318a50SLinus Walleij  * struct d40_reg_val - simple lookup struct
2948d318a50SLinus Walleij  *
2958d318a50SLinus Walleij  * @reg: The register.
2968d318a50SLinus Walleij  * @val: The value that belongs to the register in reg.
2978d318a50SLinus Walleij  */
2988d318a50SLinus Walleij struct d40_reg_val {
2998d318a50SLinus Walleij 	unsigned int reg;
3008d318a50SLinus Walleij 	unsigned int val;
3018d318a50SLinus Walleij };
3028d318a50SLinus Walleij 
3038d318a50SLinus Walleij static int d40_pool_lli_alloc(struct d40_desc *d40d,
3048d318a50SLinus Walleij 			      int lli_len, bool is_log)
3058d318a50SLinus Walleij {
3068d318a50SLinus Walleij 	u32 align;
3078d318a50SLinus Walleij 	void *base;
3088d318a50SLinus Walleij 
3098d318a50SLinus Walleij 	if (is_log)
3108d318a50SLinus Walleij 		align = sizeof(struct d40_log_lli);
3118d318a50SLinus Walleij 	else
3128d318a50SLinus Walleij 		align = sizeof(struct d40_phy_lli);
3138d318a50SLinus Walleij 
3148d318a50SLinus Walleij 	if (lli_len == 1) {
3158d318a50SLinus Walleij 		base = d40d->lli_pool.pre_alloc_lli;
3168d318a50SLinus Walleij 		d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
3178d318a50SLinus Walleij 		d40d->lli_pool.base = NULL;
3188d318a50SLinus Walleij 	} else {
3198d318a50SLinus Walleij 		d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
3208d318a50SLinus Walleij 
3218d318a50SLinus Walleij 		base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
3228d318a50SLinus Walleij 		d40d->lli_pool.base = base;
3238d318a50SLinus Walleij 
3248d318a50SLinus Walleij 		if (d40d->lli_pool.base == NULL)
3258d318a50SLinus Walleij 			return -ENOMEM;
3268d318a50SLinus Walleij 	}
3278d318a50SLinus Walleij 
3288d318a50SLinus Walleij 	if (is_log) {
3298d318a50SLinus Walleij 		d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
3308d318a50SLinus Walleij 					      align);
3318d318a50SLinus Walleij 		d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
3328d318a50SLinus Walleij 					      align);
3338d318a50SLinus Walleij 	} else {
3348d318a50SLinus Walleij 		d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
3358d318a50SLinus Walleij 					      align);
3368d318a50SLinus Walleij 		d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
3378d318a50SLinus Walleij 					      align);
3388d318a50SLinus Walleij 
3398d318a50SLinus Walleij 		d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src);
3408d318a50SLinus Walleij 		d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst);
3418d318a50SLinus Walleij 	}
3428d318a50SLinus Walleij 
3438d318a50SLinus Walleij 	return 0;
3448d318a50SLinus Walleij }
3458d318a50SLinus Walleij 
3468d318a50SLinus Walleij static void d40_pool_lli_free(struct d40_desc *d40d)
3478d318a50SLinus Walleij {
3488d318a50SLinus Walleij 	kfree(d40d->lli_pool.base);
3498d318a50SLinus Walleij 	d40d->lli_pool.base = NULL;
3508d318a50SLinus Walleij 	d40d->lli_pool.size = 0;
3518d318a50SLinus Walleij 	d40d->lli_log.src = NULL;
3528d318a50SLinus Walleij 	d40d->lli_log.dst = NULL;
3538d318a50SLinus Walleij 	d40d->lli_phy.src = NULL;
3548d318a50SLinus Walleij 	d40d->lli_phy.dst = NULL;
3558d318a50SLinus Walleij 	d40d->lli_phy.src_addr = 0;
3568d318a50SLinus Walleij 	d40d->lli_phy.dst_addr = 0;
3578d318a50SLinus Walleij }
3588d318a50SLinus Walleij 
3598d318a50SLinus Walleij static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
3608d318a50SLinus Walleij 				      struct d40_desc *desc)
3618d318a50SLinus Walleij {
3628d318a50SLinus Walleij 	dma_cookie_t cookie = d40c->chan.cookie;
3638d318a50SLinus Walleij 
3648d318a50SLinus Walleij 	if (++cookie < 0)
3658d318a50SLinus Walleij 		cookie = 1;
3668d318a50SLinus Walleij 
3678d318a50SLinus Walleij 	d40c->chan.cookie = cookie;
3688d318a50SLinus Walleij 	desc->txd.cookie = cookie;
3698d318a50SLinus Walleij 
3708d318a50SLinus Walleij 	return cookie;
3718d318a50SLinus Walleij }
3728d318a50SLinus Walleij 
3738d318a50SLinus Walleij static void d40_desc_remove(struct d40_desc *d40d)
3748d318a50SLinus Walleij {
3758d318a50SLinus Walleij 	list_del(&d40d->node);
3768d318a50SLinus Walleij }
3778d318a50SLinus Walleij 
3788d318a50SLinus Walleij static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
3798d318a50SLinus Walleij {
3808d318a50SLinus Walleij 	struct d40_desc *d;
3818d318a50SLinus Walleij 	struct d40_desc *_d;
3828d318a50SLinus Walleij 
3838d318a50SLinus Walleij 	if (!list_empty(&d40c->client)) {
3848d318a50SLinus Walleij 		list_for_each_entry_safe(d, _d, &d40c->client, node)
3858d318a50SLinus Walleij 			if (async_tx_test_ack(&d->txd)) {
3868d318a50SLinus Walleij 				d40_pool_lli_free(d);
3878d318a50SLinus Walleij 				d40_desc_remove(d);
388c675b1b4SJonas Aaberg 				break;
3898d318a50SLinus Walleij 			}
3908d318a50SLinus Walleij 	} else {
391c675b1b4SJonas Aaberg 		d = kmem_cache_alloc(d40c->base->desc_slab, GFP_NOWAIT);
392c675b1b4SJonas Aaberg 		if (d != NULL) {
393c675b1b4SJonas Aaberg 			memset(d, 0, sizeof(struct d40_desc));
394c675b1b4SJonas Aaberg 			INIT_LIST_HEAD(&d->node);
3958d318a50SLinus Walleij 		}
396c675b1b4SJonas Aaberg 	}
397c675b1b4SJonas Aaberg 	return d;
3988d318a50SLinus Walleij }
3998d318a50SLinus Walleij 
4008d318a50SLinus Walleij static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
4018d318a50SLinus Walleij {
402c675b1b4SJonas Aaberg 	kmem_cache_free(d40c->base->desc_slab, d40d);
4038d318a50SLinus Walleij }
4048d318a50SLinus Walleij 
4058d318a50SLinus Walleij static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
4068d318a50SLinus Walleij {
4078d318a50SLinus Walleij 	list_add_tail(&desc->node, &d40c->active);
4088d318a50SLinus Walleij }
4098d318a50SLinus Walleij 
4108d318a50SLinus Walleij static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
4118d318a50SLinus Walleij {
4128d318a50SLinus Walleij 	struct d40_desc *d;
4138d318a50SLinus Walleij 
4148d318a50SLinus Walleij 	if (list_empty(&d40c->active))
4158d318a50SLinus Walleij 		return NULL;
4168d318a50SLinus Walleij 
4178d318a50SLinus Walleij 	d = list_first_entry(&d40c->active,
4188d318a50SLinus Walleij 			     struct d40_desc,
4198d318a50SLinus Walleij 			     node);
4208d318a50SLinus Walleij 	return d;
4218d318a50SLinus Walleij }
4228d318a50SLinus Walleij 
4238d318a50SLinus Walleij static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
4248d318a50SLinus Walleij {
4258d318a50SLinus Walleij 	list_add_tail(&desc->node, &d40c->queue);
4268d318a50SLinus Walleij }
4278d318a50SLinus Walleij 
4288d318a50SLinus Walleij static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
4298d318a50SLinus Walleij {
4308d318a50SLinus Walleij 	struct d40_desc *d;
4318d318a50SLinus Walleij 
4328d318a50SLinus Walleij 	if (list_empty(&d40c->queue))
4338d318a50SLinus Walleij 		return NULL;
4348d318a50SLinus Walleij 
4358d318a50SLinus Walleij 	d = list_first_entry(&d40c->queue,
4368d318a50SLinus Walleij 			     struct d40_desc,
4378d318a50SLinus Walleij 			     node);
4388d318a50SLinus Walleij 	return d;
4398d318a50SLinus Walleij }
4408d318a50SLinus Walleij 
4418d318a50SLinus Walleij /* Support functions for logical channels */
4428d318a50SLinus Walleij 
443508849adSLinus Walleij static int d40_lcla_id_get(struct d40_chan *d40c)
4448d318a50SLinus Walleij {
4458d318a50SLinus Walleij 	int src_id = 0;
4468d318a50SLinus Walleij 	int dst_id = 0;
4478d318a50SLinus Walleij 	struct d40_log_lli *lcla_lidx_base =
448508849adSLinus Walleij 		d40c->base->lcla_pool.base + d40c->phy_chan->num * 1024;
4498d318a50SLinus Walleij 	int i;
4508d318a50SLinus Walleij 	int lli_per_log = d40c->base->plat_data->llis_per_log;
4512292b880SJonas Aaberg 	unsigned long flags;
4528d318a50SLinus Walleij 
4538d318a50SLinus Walleij 	if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
4548d318a50SLinus Walleij 		return 0;
4558d318a50SLinus Walleij 
456508849adSLinus Walleij 	if (d40c->base->lcla_pool.num_blocks > 32)
4578d318a50SLinus Walleij 		return -EINVAL;
4588d318a50SLinus Walleij 
459508849adSLinus Walleij 	spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
4608d318a50SLinus Walleij 
461508849adSLinus Walleij 	for (i = 0; i < d40c->base->lcla_pool.num_blocks; i++) {
462508849adSLinus Walleij 		if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &
463508849adSLinus Walleij 		      (0x1 << i))) {
464508849adSLinus Walleij 			d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |=
465508849adSLinus Walleij 				(0x1 << i);
4668d318a50SLinus Walleij 			break;
4678d318a50SLinus Walleij 		}
4688d318a50SLinus Walleij 	}
4698d318a50SLinus Walleij 	src_id = i;
470508849adSLinus Walleij 	if (src_id >= d40c->base->lcla_pool.num_blocks)
4718d318a50SLinus Walleij 		goto err;
4728d318a50SLinus Walleij 
473508849adSLinus Walleij 	for (; i < d40c->base->lcla_pool.num_blocks; i++) {
474508849adSLinus Walleij 		if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &
475508849adSLinus Walleij 		      (0x1 << i))) {
476508849adSLinus Walleij 			d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |=
477508849adSLinus Walleij 				(0x1 << i);
4788d318a50SLinus Walleij 			break;
4798d318a50SLinus Walleij 		}
4808d318a50SLinus Walleij 	}
4818d318a50SLinus Walleij 
4828d318a50SLinus Walleij 	dst_id = i;
4838d318a50SLinus Walleij 	if (dst_id == src_id)
4848d318a50SLinus Walleij 		goto err;
4858d318a50SLinus Walleij 
4868d318a50SLinus Walleij 	d40c->lcla.src_id = src_id;
4878d318a50SLinus Walleij 	d40c->lcla.dst_id = dst_id;
4888d318a50SLinus Walleij 	d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
4898d318a50SLinus Walleij 	d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
4908d318a50SLinus Walleij 
491508849adSLinus Walleij 	spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
4928d318a50SLinus Walleij 	return 0;
4938d318a50SLinus Walleij err:
494508849adSLinus Walleij 	spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
4958d318a50SLinus Walleij 	return -EINVAL;
4968d318a50SLinus Walleij }
4978d318a50SLinus Walleij 
4988d318a50SLinus Walleij 
4998d318a50SLinus Walleij static int d40_channel_execute_command(struct d40_chan *d40c,
5008d318a50SLinus Walleij 				       enum d40_command command)
5018d318a50SLinus Walleij {
5028d318a50SLinus Walleij 	int status, i;
5038d318a50SLinus Walleij 	void __iomem *active_reg;
5048d318a50SLinus Walleij 	int ret = 0;
5058d318a50SLinus Walleij 	unsigned long flags;
5061d392a7bSJonas Aaberg 	u32 wmask;
5078d318a50SLinus Walleij 
5088d318a50SLinus Walleij 	spin_lock_irqsave(&d40c->base->execmd_lock, flags);
5098d318a50SLinus Walleij 
5108d318a50SLinus Walleij 	if (d40c->phy_chan->num % 2 == 0)
5118d318a50SLinus Walleij 		active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
5128d318a50SLinus Walleij 	else
5138d318a50SLinus Walleij 		active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
5148d318a50SLinus Walleij 
5158d318a50SLinus Walleij 	if (command == D40_DMA_SUSPEND_REQ) {
5168d318a50SLinus Walleij 		status = (readl(active_reg) &
5178d318a50SLinus Walleij 			  D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
5188d318a50SLinus Walleij 			D40_CHAN_POS(d40c->phy_chan->num);
5198d318a50SLinus Walleij 
5208d318a50SLinus Walleij 		if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
5218d318a50SLinus Walleij 			goto done;
5228d318a50SLinus Walleij 	}
5238d318a50SLinus Walleij 
5241d392a7bSJonas Aaberg 	wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
5251d392a7bSJonas Aaberg 	writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
5261d392a7bSJonas Aaberg 	       active_reg);
5278d318a50SLinus Walleij 
5288d318a50SLinus Walleij 	if (command == D40_DMA_SUSPEND_REQ) {
5298d318a50SLinus Walleij 
5308d318a50SLinus Walleij 		for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
5318d318a50SLinus Walleij 			status = (readl(active_reg) &
5328d318a50SLinus Walleij 				  D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
5338d318a50SLinus Walleij 				D40_CHAN_POS(d40c->phy_chan->num);
5348d318a50SLinus Walleij 
5358d318a50SLinus Walleij 			cpu_relax();
5368d318a50SLinus Walleij 			/*
5378d318a50SLinus Walleij 			 * Reduce the number of bus accesses while
5388d318a50SLinus Walleij 			 * waiting for the DMA to suspend.
5398d318a50SLinus Walleij 			 */
5408d318a50SLinus Walleij 			udelay(3);
5418d318a50SLinus Walleij 
5428d318a50SLinus Walleij 			if (status == D40_DMA_STOP ||
5438d318a50SLinus Walleij 			    status == D40_DMA_SUSPENDED)
5448d318a50SLinus Walleij 				break;
5458d318a50SLinus Walleij 		}
5468d318a50SLinus Walleij 
5478d318a50SLinus Walleij 		if (i == D40_SUSPEND_MAX_IT) {
5488d318a50SLinus Walleij 			dev_err(&d40c->chan.dev->device,
5498d318a50SLinus Walleij 				"[%s]: unable to suspend the chl %d (log: %d) status %x\n",
5508d318a50SLinus Walleij 				__func__, d40c->phy_chan->num, d40c->log_num,
5518d318a50SLinus Walleij 				status);
5528d318a50SLinus Walleij 			dump_stack();
5538d318a50SLinus Walleij 			ret = -EBUSY;
5548d318a50SLinus Walleij 		}
5558d318a50SLinus Walleij 
5568d318a50SLinus Walleij 	}
5578d318a50SLinus Walleij done:
5588d318a50SLinus Walleij 	spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
5598d318a50SLinus Walleij 	return ret;
5608d318a50SLinus Walleij }
5618d318a50SLinus Walleij 
5628d318a50SLinus Walleij static void d40_term_all(struct d40_chan *d40c)
5638d318a50SLinus Walleij {
5648d318a50SLinus Walleij 	struct d40_desc *d40d;
565508849adSLinus Walleij 	unsigned long flags;
5668d318a50SLinus Walleij 
5678d318a50SLinus Walleij 	/* Release active descriptors */
5688d318a50SLinus Walleij 	while ((d40d = d40_first_active_get(d40c))) {
5698d318a50SLinus Walleij 		d40_desc_remove(d40d);
5708d318a50SLinus Walleij 
5718d318a50SLinus Walleij 		/* Return desc to free-list */
5728d318a50SLinus Walleij 		d40_desc_free(d40c, d40d);
5738d318a50SLinus Walleij 	}
5748d318a50SLinus Walleij 
5758d318a50SLinus Walleij 	/* Release queued descriptors waiting for transfer */
5768d318a50SLinus Walleij 	while ((d40d = d40_first_queued(d40c))) {
5778d318a50SLinus Walleij 		d40_desc_remove(d40d);
5788d318a50SLinus Walleij 
5798d318a50SLinus Walleij 		/* Return desc to free-list */
5808d318a50SLinus Walleij 		d40_desc_free(d40c, d40d);
5818d318a50SLinus Walleij 	}
5828d318a50SLinus Walleij 
583508849adSLinus Walleij 	spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
584508849adSLinus Walleij 
585508849adSLinus Walleij 	d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &=
586508849adSLinus Walleij 		(~(0x1 << d40c->lcla.dst_id));
587508849adSLinus Walleij 	d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &=
588508849adSLinus Walleij 		(~(0x1 << d40c->lcla.src_id));
589508849adSLinus Walleij 
590508849adSLinus Walleij 	d40c->lcla.src_id = -1;
591508849adSLinus Walleij 	d40c->lcla.dst_id = -1;
592508849adSLinus Walleij 
593508849adSLinus Walleij 	spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
5948d318a50SLinus Walleij 
5958d318a50SLinus Walleij 	d40c->pending_tx = 0;
5968d318a50SLinus Walleij 	d40c->busy = false;
5978d318a50SLinus Walleij }
5988d318a50SLinus Walleij 
5998d318a50SLinus Walleij static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
6008d318a50SLinus Walleij {
6018d318a50SLinus Walleij 	u32 val;
6028d318a50SLinus Walleij 	unsigned long flags;
6038d318a50SLinus Walleij 
6040c32269dSJonas Aaberg 	/* Notice, that disable requires the physical channel to be stopped */
6058d318a50SLinus Walleij 	if (do_enable)
6068d318a50SLinus Walleij 		val = D40_ACTIVATE_EVENTLINE;
6078d318a50SLinus Walleij 	else
6088d318a50SLinus Walleij 		val = D40_DEACTIVATE_EVENTLINE;
6098d318a50SLinus Walleij 
6108d318a50SLinus Walleij 	spin_lock_irqsave(&d40c->phy_chan->lock, flags);
6118d318a50SLinus Walleij 
6128d318a50SLinus Walleij 	/* Enable event line connected to device (or memcpy) */
6138d318a50SLinus Walleij 	if ((d40c->dma_cfg.dir ==  STEDMA40_PERIPH_TO_MEM) ||
6148d318a50SLinus Walleij 	    (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
6158d318a50SLinus Walleij 		u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
6168d318a50SLinus Walleij 
6178d318a50SLinus Walleij 		writel((val << D40_EVENTLINE_POS(event)) |
6188d318a50SLinus Walleij 		       ~D40_EVENTLINE_MASK(event),
6198d318a50SLinus Walleij 		       d40c->base->virtbase + D40_DREG_PCBASE +
6208d318a50SLinus Walleij 		       d40c->phy_chan->num * D40_DREG_PCDELTA +
6218d318a50SLinus Walleij 		       D40_CHAN_REG_SSLNK);
6228d318a50SLinus Walleij 	}
6238d318a50SLinus Walleij 	if (d40c->dma_cfg.dir !=  STEDMA40_PERIPH_TO_MEM) {
6248d318a50SLinus Walleij 		u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
6258d318a50SLinus Walleij 
6268d318a50SLinus Walleij 		writel((val << D40_EVENTLINE_POS(event)) |
6278d318a50SLinus Walleij 		       ~D40_EVENTLINE_MASK(event),
6288d318a50SLinus Walleij 		       d40c->base->virtbase + D40_DREG_PCBASE +
6298d318a50SLinus Walleij 		       d40c->phy_chan->num * D40_DREG_PCDELTA +
6308d318a50SLinus Walleij 		       D40_CHAN_REG_SDLNK);
6318d318a50SLinus Walleij 	}
6328d318a50SLinus Walleij 
6338d318a50SLinus Walleij 	spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
6348d318a50SLinus Walleij }
6358d318a50SLinus Walleij 
636a5ebca47SJonas Aaberg static u32 d40_chan_has_events(struct d40_chan *d40c)
6378d318a50SLinus Walleij {
6388d318a50SLinus Walleij 	u32 val = 0;
6398d318a50SLinus Walleij 
6408d318a50SLinus Walleij 	/* If SSLNK or SDLNK is zero all events are disabled */
6418d318a50SLinus Walleij 	if ((d40c->dma_cfg.dir ==  STEDMA40_PERIPH_TO_MEM) ||
6428d318a50SLinus Walleij 	    (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
6438d318a50SLinus Walleij 		val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
6448d318a50SLinus Walleij 			    d40c->phy_chan->num * D40_DREG_PCDELTA +
6458d318a50SLinus Walleij 			    D40_CHAN_REG_SSLNK);
6468d318a50SLinus Walleij 
6478d318a50SLinus Walleij 	if (d40c->dma_cfg.dir !=  STEDMA40_PERIPH_TO_MEM)
6488d318a50SLinus Walleij 		val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
6498d318a50SLinus Walleij 			    d40c->phy_chan->num * D40_DREG_PCDELTA +
6508d318a50SLinus Walleij 			    D40_CHAN_REG_SDLNK);
651a5ebca47SJonas Aaberg 	return val;
6528d318a50SLinus Walleij }
6538d318a50SLinus Walleij 
6548d318a50SLinus Walleij static void d40_config_enable_lidx(struct d40_chan *d40c)
6558d318a50SLinus Walleij {
6568d318a50SLinus Walleij 	/* Set LIDX for lcla */
6578d318a50SLinus Walleij 	writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
6588d318a50SLinus Walleij 	       D40_SREG_ELEM_LOG_LIDX_MASK,
6598d318a50SLinus Walleij 	       d40c->base->virtbase + D40_DREG_PCBASE +
6608d318a50SLinus Walleij 	       d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
6618d318a50SLinus Walleij 
6628d318a50SLinus Walleij 	writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
6638d318a50SLinus Walleij 	       D40_SREG_ELEM_LOG_LIDX_MASK,
6648d318a50SLinus Walleij 	       d40c->base->virtbase + D40_DREG_PCBASE +
6658d318a50SLinus Walleij 	       d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
6668d318a50SLinus Walleij }
6678d318a50SLinus Walleij 
6688d318a50SLinus Walleij static int d40_config_write(struct d40_chan *d40c)
6698d318a50SLinus Walleij {
6708d318a50SLinus Walleij 	u32 addr_base;
6718d318a50SLinus Walleij 	u32 var;
6728d318a50SLinus Walleij 	int res;
6738d318a50SLinus Walleij 
6748d318a50SLinus Walleij 	res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
6758d318a50SLinus Walleij 	if (res)
6768d318a50SLinus Walleij 		return res;
6778d318a50SLinus Walleij 
6788d318a50SLinus Walleij 	/* Odd addresses are even addresses + 4 */
6798d318a50SLinus Walleij 	addr_base = (d40c->phy_chan->num % 2) * 4;
6808d318a50SLinus Walleij 	/* Setup channel mode to logical or physical */
6818d318a50SLinus Walleij 	var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
6828d318a50SLinus Walleij 		D40_CHAN_POS(d40c->phy_chan->num);
6838d318a50SLinus Walleij 	writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
6848d318a50SLinus Walleij 
6858d318a50SLinus Walleij 	/* Setup operational mode option register */
6868d318a50SLinus Walleij 	var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
6878d318a50SLinus Walleij 	       0x3) << D40_CHAN_POS(d40c->phy_chan->num);
6888d318a50SLinus Walleij 
6898d318a50SLinus Walleij 	writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
6908d318a50SLinus Walleij 
6918d318a50SLinus Walleij 	if (d40c->log_num != D40_PHY_CHAN) {
6928d318a50SLinus Walleij 		/* Set default config for CFG reg */
6938d318a50SLinus Walleij 		writel(d40c->src_def_cfg,
6948d318a50SLinus Walleij 		       d40c->base->virtbase + D40_DREG_PCBASE +
6958d318a50SLinus Walleij 		       d40c->phy_chan->num * D40_DREG_PCDELTA +
6968d318a50SLinus Walleij 		       D40_CHAN_REG_SSCFG);
6978d318a50SLinus Walleij 		writel(d40c->dst_def_cfg,
6988d318a50SLinus Walleij 		       d40c->base->virtbase + D40_DREG_PCBASE +
6998d318a50SLinus Walleij 		       d40c->phy_chan->num * D40_DREG_PCDELTA +
7008d318a50SLinus Walleij 		       D40_CHAN_REG_SDCFG);
7018d318a50SLinus Walleij 
7028d318a50SLinus Walleij 		d40_config_enable_lidx(d40c);
7038d318a50SLinus Walleij 	}
7048d318a50SLinus Walleij 	return res;
7058d318a50SLinus Walleij }
7068d318a50SLinus Walleij 
7078d318a50SLinus Walleij static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
7088d318a50SLinus Walleij {
7098d318a50SLinus Walleij 	if (d40d->lli_phy.dst && d40d->lli_phy.src) {
7108d318a50SLinus Walleij 		d40_phy_lli_write(d40c->base->virtbase,
7118d318a50SLinus Walleij 				  d40c->phy_chan->num,
7128d318a50SLinus Walleij 				  d40d->lli_phy.dst,
7138d318a50SLinus Walleij 				  d40d->lli_phy.src);
7148d318a50SLinus Walleij 	} else if (d40d->lli_log.dst && d40d->lli_log.src) {
7158d318a50SLinus Walleij 		struct d40_log_lli *src = d40d->lli_log.src;
7168d318a50SLinus Walleij 		struct d40_log_lli *dst = d40d->lli_log.dst;
717508849adSLinus Walleij 		int s;
7188d318a50SLinus Walleij 
719941b77a3SPer Friden 		src += d40d->lli_count;
720941b77a3SPer Friden 		dst += d40d->lli_count;
721508849adSLinus Walleij 		s = d40_log_lli_write(d40c->lcpa,
722508849adSLinus Walleij 				      d40c->lcla.src, d40c->lcla.dst,
7238d318a50SLinus Walleij 				      dst, src,
7248d318a50SLinus Walleij 				      d40c->base->plat_data->llis_per_log);
725508849adSLinus Walleij 
726508849adSLinus Walleij 		/* If s equals to zero, the job is not linked */
727508849adSLinus Walleij 		if (s > 0) {
728508849adSLinus Walleij 			(void) dma_map_single(d40c->base->dev, d40c->lcla.src,
729508849adSLinus Walleij 					      s * sizeof(struct d40_log_lli),
730508849adSLinus Walleij 					      DMA_TO_DEVICE);
731508849adSLinus Walleij 			(void) dma_map_single(d40c->base->dev, d40c->lcla.dst,
732508849adSLinus Walleij 					      s * sizeof(struct d40_log_lli),
733508849adSLinus Walleij 					      DMA_TO_DEVICE);
734508849adSLinus Walleij 		}
7358d318a50SLinus Walleij 	}
736941b77a3SPer Friden 	d40d->lli_count += d40d->lli_tx_len;
7378d318a50SLinus Walleij }
7388d318a50SLinus Walleij 
7398d318a50SLinus Walleij static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
7408d318a50SLinus Walleij {
7418d318a50SLinus Walleij 	struct d40_chan *d40c = container_of(tx->chan,
7428d318a50SLinus Walleij 					     struct d40_chan,
7438d318a50SLinus Walleij 					     chan);
7448d318a50SLinus Walleij 	struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
7458d318a50SLinus Walleij 	unsigned long flags;
7468d318a50SLinus Walleij 
7478d318a50SLinus Walleij 	spin_lock_irqsave(&d40c->lock, flags);
7488d318a50SLinus Walleij 
7498d318a50SLinus Walleij 	tx->cookie = d40_assign_cookie(d40c, d40d);
7508d318a50SLinus Walleij 
7518d318a50SLinus Walleij 	d40_desc_queue(d40c, d40d);
7528d318a50SLinus Walleij 
7538d318a50SLinus Walleij 	spin_unlock_irqrestore(&d40c->lock, flags);
7548d318a50SLinus Walleij 
7558d318a50SLinus Walleij 	return tx->cookie;
7568d318a50SLinus Walleij }
7578d318a50SLinus Walleij 
7588d318a50SLinus Walleij static int d40_start(struct d40_chan *d40c)
7598d318a50SLinus Walleij {
7600c32269dSJonas Aaberg 	if (d40c->log_num != D40_PHY_CHAN)
7618d318a50SLinus Walleij 		d40_config_set_event(d40c, true);
7628d318a50SLinus Walleij 
7630c32269dSJonas Aaberg 	return d40_channel_execute_command(d40c, D40_DMA_RUN);
7648d318a50SLinus Walleij }
7658d318a50SLinus Walleij 
7668d318a50SLinus Walleij static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
7678d318a50SLinus Walleij {
7688d318a50SLinus Walleij 	struct d40_desc *d40d;
7698d318a50SLinus Walleij 	int err;
7708d318a50SLinus Walleij 
7718d318a50SLinus Walleij 	/* Start queued jobs, if any */
7728d318a50SLinus Walleij 	d40d = d40_first_queued(d40c);
7738d318a50SLinus Walleij 
7748d318a50SLinus Walleij 	if (d40d != NULL) {
7758d318a50SLinus Walleij 		d40c->busy = true;
7768d318a50SLinus Walleij 
7778d318a50SLinus Walleij 		/* Remove from queue */
7788d318a50SLinus Walleij 		d40_desc_remove(d40d);
7798d318a50SLinus Walleij 
7808d318a50SLinus Walleij 		/* Add to active queue */
7818d318a50SLinus Walleij 		d40_desc_submit(d40c, d40d);
7828d318a50SLinus Walleij 
7838d318a50SLinus Walleij 		/* Initiate DMA job */
7848d318a50SLinus Walleij 		d40_desc_load(d40c, d40d);
7858d318a50SLinus Walleij 
7868d318a50SLinus Walleij 		/* Start dma job */
7878d318a50SLinus Walleij 		err = d40_start(d40c);
7888d318a50SLinus Walleij 
7898d318a50SLinus Walleij 		if (err)
7908d318a50SLinus Walleij 			return NULL;
7918d318a50SLinus Walleij 	}
7928d318a50SLinus Walleij 
7938d318a50SLinus Walleij 	return d40d;
7948d318a50SLinus Walleij }
7958d318a50SLinus Walleij 
7968d318a50SLinus Walleij /* called from interrupt context */
7978d318a50SLinus Walleij static void dma_tc_handle(struct d40_chan *d40c)
7988d318a50SLinus Walleij {
7998d318a50SLinus Walleij 	struct d40_desc *d40d;
8008d318a50SLinus Walleij 
8018d318a50SLinus Walleij 	if (!d40c->phy_chan)
8028d318a50SLinus Walleij 		return;
8038d318a50SLinus Walleij 
8048d318a50SLinus Walleij 	/* Get first active entry from list */
8058d318a50SLinus Walleij 	d40d = d40_first_active_get(d40c);
8068d318a50SLinus Walleij 
8078d318a50SLinus Walleij 	if (d40d == NULL)
8088d318a50SLinus Walleij 		return;
8098d318a50SLinus Walleij 
810941b77a3SPer Friden 	if (d40d->lli_count < d40d->lli_len) {
8118d318a50SLinus Walleij 
8128d318a50SLinus Walleij 		d40_desc_load(d40c, d40d);
8138d318a50SLinus Walleij 		/* Start dma job */
8148d318a50SLinus Walleij 		(void) d40_start(d40c);
8158d318a50SLinus Walleij 		return;
8168d318a50SLinus Walleij 	}
8178d318a50SLinus Walleij 
8188d318a50SLinus Walleij 	if (d40_queue_start(d40c) == NULL)
8198d318a50SLinus Walleij 		d40c->busy = false;
8208d318a50SLinus Walleij 
8218d318a50SLinus Walleij 	d40c->pending_tx++;
8228d318a50SLinus Walleij 	tasklet_schedule(&d40c->tasklet);
8238d318a50SLinus Walleij 
8248d318a50SLinus Walleij }
8258d318a50SLinus Walleij 
8268d318a50SLinus Walleij static void dma_tasklet(unsigned long data)
8278d318a50SLinus Walleij {
8288d318a50SLinus Walleij 	struct d40_chan *d40c = (struct d40_chan *) data;
8298d318a50SLinus Walleij 	struct d40_desc *d40d_fin;
8308d318a50SLinus Walleij 	unsigned long flags;
8318d318a50SLinus Walleij 	dma_async_tx_callback callback;
8328d318a50SLinus Walleij 	void *callback_param;
8338d318a50SLinus Walleij 
8348d318a50SLinus Walleij 	spin_lock_irqsave(&d40c->lock, flags);
8358d318a50SLinus Walleij 
8368d318a50SLinus Walleij 	/* Get first active entry from list */
8378d318a50SLinus Walleij 	d40d_fin = d40_first_active_get(d40c);
8388d318a50SLinus Walleij 
8398d318a50SLinus Walleij 	if (d40d_fin == NULL)
8408d318a50SLinus Walleij 		goto err;
8418d318a50SLinus Walleij 
8428d318a50SLinus Walleij 	d40c->completed = d40d_fin->txd.cookie;
8438d318a50SLinus Walleij 
8448d318a50SLinus Walleij 	/*
8458d318a50SLinus Walleij 	 * If terminating a channel pending_tx is set to zero.
8468d318a50SLinus Walleij 	 * This prevents any finished active jobs to return to the client.
8478d318a50SLinus Walleij 	 */
8488d318a50SLinus Walleij 	if (d40c->pending_tx == 0) {
8498d318a50SLinus Walleij 		spin_unlock_irqrestore(&d40c->lock, flags);
8508d318a50SLinus Walleij 		return;
8518d318a50SLinus Walleij 	}
8528d318a50SLinus Walleij 
8538d318a50SLinus Walleij 	/* Callback to client */
8548d318a50SLinus Walleij 	callback = d40d_fin->txd.callback;
8558d318a50SLinus Walleij 	callback_param = d40d_fin->txd.callback_param;
8568d318a50SLinus Walleij 
8578d318a50SLinus Walleij 	if (async_tx_test_ack(&d40d_fin->txd)) {
8588d318a50SLinus Walleij 		d40_pool_lli_free(d40d_fin);
8598d318a50SLinus Walleij 		d40_desc_remove(d40d_fin);
8608d318a50SLinus Walleij 		/* Return desc to free-list */
8618d318a50SLinus Walleij 		d40_desc_free(d40c, d40d_fin);
8628d318a50SLinus Walleij 	} else {
8638d318a50SLinus Walleij 		if (!d40d_fin->is_in_client_list) {
8648d318a50SLinus Walleij 			d40_desc_remove(d40d_fin);
8658d318a50SLinus Walleij 			list_add_tail(&d40d_fin->node, &d40c->client);
8668d318a50SLinus Walleij 			d40d_fin->is_in_client_list = true;
8678d318a50SLinus Walleij 		}
8688d318a50SLinus Walleij 	}
8698d318a50SLinus Walleij 
8708d318a50SLinus Walleij 	d40c->pending_tx--;
8718d318a50SLinus Walleij 
8728d318a50SLinus Walleij 	if (d40c->pending_tx)
8738d318a50SLinus Walleij 		tasklet_schedule(&d40c->tasklet);
8748d318a50SLinus Walleij 
8758d318a50SLinus Walleij 	spin_unlock_irqrestore(&d40c->lock, flags);
8768d318a50SLinus Walleij 
8778d318a50SLinus Walleij 	if (callback)
8788d318a50SLinus Walleij 		callback(callback_param);
8798d318a50SLinus Walleij 
8808d318a50SLinus Walleij 	return;
8818d318a50SLinus Walleij 
8828d318a50SLinus Walleij  err:
8838d318a50SLinus Walleij 	/* Rescue manouver if receiving double interrupts */
8848d318a50SLinus Walleij 	if (d40c->pending_tx > 0)
8858d318a50SLinus Walleij 		d40c->pending_tx--;
8868d318a50SLinus Walleij 	spin_unlock_irqrestore(&d40c->lock, flags);
8878d318a50SLinus Walleij }
8888d318a50SLinus Walleij 
8898d318a50SLinus Walleij static irqreturn_t d40_handle_interrupt(int irq, void *data)
8908d318a50SLinus Walleij {
8918d318a50SLinus Walleij 	static const struct d40_interrupt_lookup il[] = {
8928d318a50SLinus Walleij 		{D40_DREG_LCTIS0, D40_DREG_LCICR0, false,  0},
8938d318a50SLinus Walleij 		{D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
8948d318a50SLinus Walleij 		{D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
8958d318a50SLinus Walleij 		{D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
8968d318a50SLinus Walleij 		{D40_DREG_LCEIS0, D40_DREG_LCICR0, true,   0},
8978d318a50SLinus Walleij 		{D40_DREG_LCEIS1, D40_DREG_LCICR1, true,  32},
8988d318a50SLinus Walleij 		{D40_DREG_LCEIS2, D40_DREG_LCICR2, true,  64},
8998d318a50SLinus Walleij 		{D40_DREG_LCEIS3, D40_DREG_LCICR3, true,  96},
9008d318a50SLinus Walleij 		{D40_DREG_PCTIS,  D40_DREG_PCICR,  false, D40_PHY_CHAN},
9018d318a50SLinus Walleij 		{D40_DREG_PCEIS,  D40_DREG_PCICR,  true,  D40_PHY_CHAN},
9028d318a50SLinus Walleij 	};
9038d318a50SLinus Walleij 
9048d318a50SLinus Walleij 	int i;
9058d318a50SLinus Walleij 	u32 regs[ARRAY_SIZE(il)];
9068d318a50SLinus Walleij 	u32 tmp;
9078d318a50SLinus Walleij 	u32 idx;
9088d318a50SLinus Walleij 	u32 row;
9098d318a50SLinus Walleij 	long chan = -1;
9108d318a50SLinus Walleij 	struct d40_chan *d40c;
9118d318a50SLinus Walleij 	unsigned long flags;
9128d318a50SLinus Walleij 	struct d40_base *base = data;
9138d318a50SLinus Walleij 
9148d318a50SLinus Walleij 	spin_lock_irqsave(&base->interrupt_lock, flags);
9158d318a50SLinus Walleij 
9168d318a50SLinus Walleij 	/* Read interrupt status of both logical and physical channels */
9178d318a50SLinus Walleij 	for (i = 0; i < ARRAY_SIZE(il); i++)
9188d318a50SLinus Walleij 		regs[i] = readl(base->virtbase + il[i].src);
9198d318a50SLinus Walleij 
9208d318a50SLinus Walleij 	for (;;) {
9218d318a50SLinus Walleij 
9228d318a50SLinus Walleij 		chan = find_next_bit((unsigned long *)regs,
9238d318a50SLinus Walleij 				     BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
9248d318a50SLinus Walleij 
9258d318a50SLinus Walleij 		/* No more set bits found? */
9268d318a50SLinus Walleij 		if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
9278d318a50SLinus Walleij 			break;
9288d318a50SLinus Walleij 
9298d318a50SLinus Walleij 		row = chan / BITS_PER_LONG;
9308d318a50SLinus Walleij 		idx = chan & (BITS_PER_LONG - 1);
9318d318a50SLinus Walleij 
9328d318a50SLinus Walleij 		/* ACK interrupt */
9338d318a50SLinus Walleij 		tmp = readl(base->virtbase + il[row].clr);
9348d318a50SLinus Walleij 		tmp |= 1 << idx;
9358d318a50SLinus Walleij 		writel(tmp, base->virtbase + il[row].clr);
9368d318a50SLinus Walleij 
9378d318a50SLinus Walleij 		if (il[row].offset == D40_PHY_CHAN)
9388d318a50SLinus Walleij 			d40c = base->lookup_phy_chans[idx];
9398d318a50SLinus Walleij 		else
9408d318a50SLinus Walleij 			d40c = base->lookup_log_chans[il[row].offset + idx];
9418d318a50SLinus Walleij 		spin_lock(&d40c->lock);
9428d318a50SLinus Walleij 
9438d318a50SLinus Walleij 		if (!il[row].is_error)
9448d318a50SLinus Walleij 			dma_tc_handle(d40c);
9458d318a50SLinus Walleij 		else
946508849adSLinus Walleij 			dev_err(base->dev,
947508849adSLinus Walleij 				"[%s] IRQ chan: %ld offset %d idx %d\n",
9488d318a50SLinus Walleij 				__func__, chan, il[row].offset, idx);
9498d318a50SLinus Walleij 
9508d318a50SLinus Walleij 		spin_unlock(&d40c->lock);
9518d318a50SLinus Walleij 	}
9528d318a50SLinus Walleij 
9538d318a50SLinus Walleij 	spin_unlock_irqrestore(&base->interrupt_lock, flags);
9548d318a50SLinus Walleij 
9558d318a50SLinus Walleij 	return IRQ_HANDLED;
9568d318a50SLinus Walleij }
9578d318a50SLinus Walleij 
9588d318a50SLinus Walleij 
9598d318a50SLinus Walleij static int d40_validate_conf(struct d40_chan *d40c,
9608d318a50SLinus Walleij 			     struct stedma40_chan_cfg *conf)
9618d318a50SLinus Walleij {
9628d318a50SLinus Walleij 	int res = 0;
9638d318a50SLinus Walleij 	u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
9648d318a50SLinus Walleij 	u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
9658d318a50SLinus Walleij 	bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
9668d318a50SLinus Walleij 		== STEDMA40_CHANNEL_IN_LOG_MODE;
9678d318a50SLinus Walleij 
9688d318a50SLinus Walleij 	if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH &&
9698d318a50SLinus Walleij 	    dst_event_group == STEDMA40_DEV_DST_MEMORY) {
9708d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
9718d318a50SLinus Walleij 			__func__);
9728d318a50SLinus Walleij 		res = -EINVAL;
9738d318a50SLinus Walleij 	}
9748d318a50SLinus Walleij 
9758d318a50SLinus Walleij 	if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM &&
9768d318a50SLinus Walleij 	    src_event_group == STEDMA40_DEV_SRC_MEMORY) {
9778d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
9788d318a50SLinus Walleij 			__func__);
9798d318a50SLinus Walleij 		res = -EINVAL;
9808d318a50SLinus Walleij 	}
9818d318a50SLinus Walleij 
9828d318a50SLinus Walleij 	if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
9838d318a50SLinus Walleij 	    dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
9848d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
9858d318a50SLinus Walleij 			"[%s] No event line\n", __func__);
9868d318a50SLinus Walleij 		res = -EINVAL;
9878d318a50SLinus Walleij 	}
9888d318a50SLinus Walleij 
9898d318a50SLinus Walleij 	if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
9908d318a50SLinus Walleij 	    (src_event_group != dst_event_group)) {
9918d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
9928d318a50SLinus Walleij 			"[%s] Invalid event group\n", __func__);
9938d318a50SLinus Walleij 		res = -EINVAL;
9948d318a50SLinus Walleij 	}
9958d318a50SLinus Walleij 
9968d318a50SLinus Walleij 	if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
9978d318a50SLinus Walleij 		/*
9988d318a50SLinus Walleij 		 * DMAC HW supports it. Will be added to this driver,
9998d318a50SLinus Walleij 		 * in case any dma client requires it.
10008d318a50SLinus Walleij 		 */
10018d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
10028d318a50SLinus Walleij 			"[%s] periph to periph not supported\n",
10038d318a50SLinus Walleij 			__func__);
10048d318a50SLinus Walleij 		res = -EINVAL;
10058d318a50SLinus Walleij 	}
10068d318a50SLinus Walleij 
10078d318a50SLinus Walleij 	return res;
10088d318a50SLinus Walleij }
10098d318a50SLinus Walleij 
10108d318a50SLinus Walleij static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
10114aed79b2SMarcin Mielczarczyk 			       int log_event_line, bool is_log)
10128d318a50SLinus Walleij {
10138d318a50SLinus Walleij 	unsigned long flags;
10148d318a50SLinus Walleij 	spin_lock_irqsave(&phy->lock, flags);
10154aed79b2SMarcin Mielczarczyk 	if (!is_log) {
10168d318a50SLinus Walleij 		/* Physical interrupts are masked per physical full channel */
10178d318a50SLinus Walleij 		if (phy->allocated_src == D40_ALLOC_FREE &&
10188d318a50SLinus Walleij 		    phy->allocated_dst == D40_ALLOC_FREE) {
10198d318a50SLinus Walleij 			phy->allocated_dst = D40_ALLOC_PHY;
10208d318a50SLinus Walleij 			phy->allocated_src = D40_ALLOC_PHY;
10218d318a50SLinus Walleij 			goto found;
10228d318a50SLinus Walleij 		} else
10238d318a50SLinus Walleij 			goto not_found;
10248d318a50SLinus Walleij 	}
10258d318a50SLinus Walleij 
10268d318a50SLinus Walleij 	/* Logical channel */
10278d318a50SLinus Walleij 	if (is_src) {
10288d318a50SLinus Walleij 		if (phy->allocated_src == D40_ALLOC_PHY)
10298d318a50SLinus Walleij 			goto not_found;
10308d318a50SLinus Walleij 
10318d318a50SLinus Walleij 		if (phy->allocated_src == D40_ALLOC_FREE)
10328d318a50SLinus Walleij 			phy->allocated_src = D40_ALLOC_LOG_FREE;
10338d318a50SLinus Walleij 
10348d318a50SLinus Walleij 		if (!(phy->allocated_src & (1 << log_event_line))) {
10358d318a50SLinus Walleij 			phy->allocated_src |= 1 << log_event_line;
10368d318a50SLinus Walleij 			goto found;
10378d318a50SLinus Walleij 		} else
10388d318a50SLinus Walleij 			goto not_found;
10398d318a50SLinus Walleij 	} else {
10408d318a50SLinus Walleij 		if (phy->allocated_dst == D40_ALLOC_PHY)
10418d318a50SLinus Walleij 			goto not_found;
10428d318a50SLinus Walleij 
10438d318a50SLinus Walleij 		if (phy->allocated_dst == D40_ALLOC_FREE)
10448d318a50SLinus Walleij 			phy->allocated_dst = D40_ALLOC_LOG_FREE;
10458d318a50SLinus Walleij 
10468d318a50SLinus Walleij 		if (!(phy->allocated_dst & (1 << log_event_line))) {
10478d318a50SLinus Walleij 			phy->allocated_dst |= 1 << log_event_line;
10488d318a50SLinus Walleij 			goto found;
10498d318a50SLinus Walleij 		} else
10508d318a50SLinus Walleij 			goto not_found;
10518d318a50SLinus Walleij 	}
10528d318a50SLinus Walleij 
10538d318a50SLinus Walleij not_found:
10548d318a50SLinus Walleij 	spin_unlock_irqrestore(&phy->lock, flags);
10558d318a50SLinus Walleij 	return false;
10568d318a50SLinus Walleij found:
10578d318a50SLinus Walleij 	spin_unlock_irqrestore(&phy->lock, flags);
10588d318a50SLinus Walleij 	return true;
10598d318a50SLinus Walleij }
10608d318a50SLinus Walleij 
10618d318a50SLinus Walleij static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
10628d318a50SLinus Walleij 			       int log_event_line)
10638d318a50SLinus Walleij {
10648d318a50SLinus Walleij 	unsigned long flags;
10658d318a50SLinus Walleij 	bool is_free = false;
10668d318a50SLinus Walleij 
10678d318a50SLinus Walleij 	spin_lock_irqsave(&phy->lock, flags);
10688d318a50SLinus Walleij 	if (!log_event_line) {
10698d318a50SLinus Walleij 		/* Physical interrupts are masked per physical full channel */
10708d318a50SLinus Walleij 		phy->allocated_dst = D40_ALLOC_FREE;
10718d318a50SLinus Walleij 		phy->allocated_src = D40_ALLOC_FREE;
10728d318a50SLinus Walleij 		is_free = true;
10738d318a50SLinus Walleij 		goto out;
10748d318a50SLinus Walleij 	}
10758d318a50SLinus Walleij 
10768d318a50SLinus Walleij 	/* Logical channel */
10778d318a50SLinus Walleij 	if (is_src) {
10788d318a50SLinus Walleij 		phy->allocated_src &= ~(1 << log_event_line);
10798d318a50SLinus Walleij 		if (phy->allocated_src == D40_ALLOC_LOG_FREE)
10808d318a50SLinus Walleij 			phy->allocated_src = D40_ALLOC_FREE;
10818d318a50SLinus Walleij 	} else {
10828d318a50SLinus Walleij 		phy->allocated_dst &= ~(1 << log_event_line);
10838d318a50SLinus Walleij 		if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
10848d318a50SLinus Walleij 			phy->allocated_dst = D40_ALLOC_FREE;
10858d318a50SLinus Walleij 	}
10868d318a50SLinus Walleij 
10878d318a50SLinus Walleij 	is_free = ((phy->allocated_src | phy->allocated_dst) ==
10888d318a50SLinus Walleij 		   D40_ALLOC_FREE);
10898d318a50SLinus Walleij 
10908d318a50SLinus Walleij out:
10918d318a50SLinus Walleij 	spin_unlock_irqrestore(&phy->lock, flags);
10928d318a50SLinus Walleij 
10938d318a50SLinus Walleij 	return is_free;
10948d318a50SLinus Walleij }
10958d318a50SLinus Walleij 
10968d318a50SLinus Walleij static int d40_allocate_channel(struct d40_chan *d40c)
10978d318a50SLinus Walleij {
10988d318a50SLinus Walleij 	int dev_type;
10998d318a50SLinus Walleij 	int event_group;
11008d318a50SLinus Walleij 	int event_line;
11018d318a50SLinus Walleij 	struct d40_phy_res *phys;
11028d318a50SLinus Walleij 	int i;
11038d318a50SLinus Walleij 	int j;
11048d318a50SLinus Walleij 	int log_num;
11058d318a50SLinus Walleij 	bool is_src;
1106508849adSLinus Walleij 	bool is_log = (d40c->dma_cfg.channel_type &
1107508849adSLinus Walleij 		       STEDMA40_CHANNEL_IN_OPER_MODE)
11088d318a50SLinus Walleij 		== STEDMA40_CHANNEL_IN_LOG_MODE;
11098d318a50SLinus Walleij 
11108d318a50SLinus Walleij 
11118d318a50SLinus Walleij 	phys = d40c->base->phy_res;
11128d318a50SLinus Walleij 
11138d318a50SLinus Walleij 	if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
11148d318a50SLinus Walleij 		dev_type = d40c->dma_cfg.src_dev_type;
11158d318a50SLinus Walleij 		log_num = 2 * dev_type;
11168d318a50SLinus Walleij 		is_src = true;
11178d318a50SLinus Walleij 	} else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
11188d318a50SLinus Walleij 		   d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
11198d318a50SLinus Walleij 		/* dst event lines are used for logical memcpy */
11208d318a50SLinus Walleij 		dev_type = d40c->dma_cfg.dst_dev_type;
11218d318a50SLinus Walleij 		log_num = 2 * dev_type + 1;
11228d318a50SLinus Walleij 		is_src = false;
11238d318a50SLinus Walleij 	} else
11248d318a50SLinus Walleij 		return -EINVAL;
11258d318a50SLinus Walleij 
11268d318a50SLinus Walleij 	event_group = D40_TYPE_TO_GROUP(dev_type);
11278d318a50SLinus Walleij 	event_line = D40_TYPE_TO_EVENT(dev_type);
11288d318a50SLinus Walleij 
11298d318a50SLinus Walleij 	if (!is_log) {
11308d318a50SLinus Walleij 		if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
11318d318a50SLinus Walleij 			/* Find physical half channel */
11328d318a50SLinus Walleij 			for (i = 0; i < d40c->base->num_phy_chans; i++) {
11338d318a50SLinus Walleij 
11344aed79b2SMarcin Mielczarczyk 				if (d40_alloc_mask_set(&phys[i], is_src,
11354aed79b2SMarcin Mielczarczyk 						       0, is_log))
11368d318a50SLinus Walleij 					goto found_phy;
11378d318a50SLinus Walleij 			}
11388d318a50SLinus Walleij 		} else
11398d318a50SLinus Walleij 			for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
11408d318a50SLinus Walleij 				int phy_num = j  + event_group * 2;
11418d318a50SLinus Walleij 				for (i = phy_num; i < phy_num + 2; i++) {
1142508849adSLinus Walleij 					if (d40_alloc_mask_set(&phys[i],
1143508849adSLinus Walleij 							       is_src,
1144508849adSLinus Walleij 							       0,
1145508849adSLinus Walleij 							       is_log))
11468d318a50SLinus Walleij 						goto found_phy;
11478d318a50SLinus Walleij 				}
11488d318a50SLinus Walleij 			}
11498d318a50SLinus Walleij 		return -EINVAL;
11508d318a50SLinus Walleij found_phy:
11518d318a50SLinus Walleij 		d40c->phy_chan = &phys[i];
11528d318a50SLinus Walleij 		d40c->log_num = D40_PHY_CHAN;
11538d318a50SLinus Walleij 		goto out;
11548d318a50SLinus Walleij 	}
11558d318a50SLinus Walleij 	if (dev_type == -1)
11568d318a50SLinus Walleij 		return -EINVAL;
11578d318a50SLinus Walleij 
11588d318a50SLinus Walleij 	/* Find logical channel */
11598d318a50SLinus Walleij 	for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
11608d318a50SLinus Walleij 		int phy_num = j + event_group * 2;
11618d318a50SLinus Walleij 		/*
11628d318a50SLinus Walleij 		 * Spread logical channels across all available physical rather
11638d318a50SLinus Walleij 		 * than pack every logical channel at the first available phy
11648d318a50SLinus Walleij 		 * channels.
11658d318a50SLinus Walleij 		 */
11668d318a50SLinus Walleij 		if (is_src) {
11678d318a50SLinus Walleij 			for (i = phy_num; i < phy_num + 2; i++) {
11688d318a50SLinus Walleij 				if (d40_alloc_mask_set(&phys[i], is_src,
11694aed79b2SMarcin Mielczarczyk 						       event_line, is_log))
11708d318a50SLinus Walleij 					goto found_log;
11718d318a50SLinus Walleij 			}
11728d318a50SLinus Walleij 		} else {
11738d318a50SLinus Walleij 			for (i = phy_num + 1; i >= phy_num; i--) {
11748d318a50SLinus Walleij 				if (d40_alloc_mask_set(&phys[i], is_src,
11754aed79b2SMarcin Mielczarczyk 						       event_line, is_log))
11768d318a50SLinus Walleij 					goto found_log;
11778d318a50SLinus Walleij 			}
11788d318a50SLinus Walleij 		}
11798d318a50SLinus Walleij 	}
11808d318a50SLinus Walleij 	return -EINVAL;
11818d318a50SLinus Walleij 
11828d318a50SLinus Walleij found_log:
11838d318a50SLinus Walleij 	d40c->phy_chan = &phys[i];
11848d318a50SLinus Walleij 	d40c->log_num = log_num;
11858d318a50SLinus Walleij out:
11868d318a50SLinus Walleij 
11878d318a50SLinus Walleij 	if (is_log)
11888d318a50SLinus Walleij 		d40c->base->lookup_log_chans[d40c->log_num] = d40c;
11898d318a50SLinus Walleij 	else
11908d318a50SLinus Walleij 		d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
11918d318a50SLinus Walleij 
11928d318a50SLinus Walleij 	return 0;
11938d318a50SLinus Walleij 
11948d318a50SLinus Walleij }
11958d318a50SLinus Walleij 
11968d318a50SLinus Walleij static int d40_config_memcpy(struct d40_chan *d40c)
11978d318a50SLinus Walleij {
11988d318a50SLinus Walleij 	dma_cap_mask_t cap = d40c->chan.device->cap_mask;
11998d318a50SLinus Walleij 
12008d318a50SLinus Walleij 	if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
12018d318a50SLinus Walleij 		d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
12028d318a50SLinus Walleij 		d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
12038d318a50SLinus Walleij 		d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
12048d318a50SLinus Walleij 			memcpy[d40c->chan.chan_id];
12058d318a50SLinus Walleij 
12068d318a50SLinus Walleij 	} else if (dma_has_cap(DMA_MEMCPY, cap) &&
12078d318a50SLinus Walleij 		   dma_has_cap(DMA_SLAVE, cap)) {
12088d318a50SLinus Walleij 		d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
12098d318a50SLinus Walleij 	} else {
12108d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
12118d318a50SLinus Walleij 			__func__);
12128d318a50SLinus Walleij 		return -EINVAL;
12138d318a50SLinus Walleij 	}
12148d318a50SLinus Walleij 
12158d318a50SLinus Walleij 	return 0;
12168d318a50SLinus Walleij }
12178d318a50SLinus Walleij 
12188d318a50SLinus Walleij 
12198d318a50SLinus Walleij static int d40_free_dma(struct d40_chan *d40c)
12208d318a50SLinus Walleij {
12218d318a50SLinus Walleij 
12228d318a50SLinus Walleij 	int res = 0;
1223*d181b3a8SJonas Aaberg 	u32 event;
12248d318a50SLinus Walleij 	struct d40_phy_res *phy = d40c->phy_chan;
12258d318a50SLinus Walleij 	bool is_src;
1226a8be8627SPer Friden 	struct d40_desc *d;
1227a8be8627SPer Friden 	struct d40_desc *_d;
1228a8be8627SPer Friden 
12298d318a50SLinus Walleij 
12308d318a50SLinus Walleij 	/* Terminate all queued and active transfers */
12318d318a50SLinus Walleij 	d40_term_all(d40c);
12328d318a50SLinus Walleij 
1233a8be8627SPer Friden 	/* Release client owned descriptors */
1234a8be8627SPer Friden 	if (!list_empty(&d40c->client))
1235a8be8627SPer Friden 		list_for_each_entry_safe(d, _d, &d40c->client, node) {
1236a8be8627SPer Friden 			d40_pool_lli_free(d);
1237a8be8627SPer Friden 			d40_desc_remove(d);
1238a8be8627SPer Friden 			/* Return desc to free-list */
1239a8be8627SPer Friden 			d40_desc_free(d40c, d);
1240a8be8627SPer Friden 		}
1241a8be8627SPer Friden 
12428d318a50SLinus Walleij 	if (phy == NULL) {
12438d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
12448d318a50SLinus Walleij 			__func__);
12458d318a50SLinus Walleij 		return -EINVAL;
12468d318a50SLinus Walleij 	}
12478d318a50SLinus Walleij 
12488d318a50SLinus Walleij 	if (phy->allocated_src == D40_ALLOC_FREE &&
12498d318a50SLinus Walleij 	    phy->allocated_dst == D40_ALLOC_FREE) {
12508d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
12518d318a50SLinus Walleij 			__func__);
12528d318a50SLinus Walleij 		return -EINVAL;
12538d318a50SLinus Walleij 	}
12548d318a50SLinus Walleij 
12558d318a50SLinus Walleij 	if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
12568d318a50SLinus Walleij 	    d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
12578d318a50SLinus Walleij 		event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
12588d318a50SLinus Walleij 		is_src = false;
12598d318a50SLinus Walleij 	} else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
12608d318a50SLinus Walleij 		event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
12618d318a50SLinus Walleij 		is_src = true;
12628d318a50SLinus Walleij 	} else {
12638d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
12648d318a50SLinus Walleij 			"[%s] Unknown direction\n", __func__);
12658d318a50SLinus Walleij 		return -EINVAL;
12668d318a50SLinus Walleij 	}
12678d318a50SLinus Walleij 
1268*d181b3a8SJonas Aaberg 	res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1269*d181b3a8SJonas Aaberg 	if (res) {
1270*d181b3a8SJonas Aaberg 		dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n",
1271*d181b3a8SJonas Aaberg 			__func__);
1272*d181b3a8SJonas Aaberg 		return res;
1273*d181b3a8SJonas Aaberg 	}
12748d318a50SLinus Walleij 
1275*d181b3a8SJonas Aaberg 	if (d40c->log_num != D40_PHY_CHAN) {
1276*d181b3a8SJonas Aaberg 		/* Release logical channel, deactivate the event line */
1277*d181b3a8SJonas Aaberg 
1278*d181b3a8SJonas Aaberg 		d40_config_set_event(d40c, false);
12798d318a50SLinus Walleij 		d40c->base->lookup_log_chans[d40c->log_num] = NULL;
12808d318a50SLinus Walleij 
12818d318a50SLinus Walleij 		/*
12828d318a50SLinus Walleij 		 * Check if there are more logical allocation
12838d318a50SLinus Walleij 		 * on this phy channel.
12848d318a50SLinus Walleij 		 */
12858d318a50SLinus Walleij 		if (!d40_alloc_mask_free(phy, is_src, event)) {
12868d318a50SLinus Walleij 			/* Resume the other logical channels if any */
12878d318a50SLinus Walleij 			if (d40_chan_has_events(d40c)) {
12888d318a50SLinus Walleij 				res = d40_channel_execute_command(d40c,
12898d318a50SLinus Walleij 								  D40_DMA_RUN);
12908d318a50SLinus Walleij 				if (res) {
12918d318a50SLinus Walleij 					dev_err(&d40c->chan.dev->device,
12928d318a50SLinus Walleij 						"[%s] Executing RUN command\n",
12938d318a50SLinus Walleij 						__func__);
12948d318a50SLinus Walleij 					return res;
12958d318a50SLinus Walleij 				}
12968d318a50SLinus Walleij 			}
12978d318a50SLinus Walleij 			return 0;
12988d318a50SLinus Walleij 		}
1299*d181b3a8SJonas Aaberg 	} else {
1300*d181b3a8SJonas Aaberg 		(void) d40_alloc_mask_free(phy, is_src, 0);
1301*d181b3a8SJonas Aaberg 	}
13028d318a50SLinus Walleij 
13038d318a50SLinus Walleij 	/* Release physical channel */
13048d318a50SLinus Walleij 	res = d40_channel_execute_command(d40c, D40_DMA_STOP);
13058d318a50SLinus Walleij 	if (res) {
13068d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
13078d318a50SLinus Walleij 			"[%s] Failed to stop channel\n", __func__);
13088d318a50SLinus Walleij 		return res;
13098d318a50SLinus Walleij 	}
13108d318a50SLinus Walleij 	d40c->phy_chan = NULL;
13118d318a50SLinus Walleij 	/* Invalidate channel type */
13128d318a50SLinus Walleij 	d40c->dma_cfg.channel_type = 0;
13138d318a50SLinus Walleij 	d40c->base->lookup_phy_chans[phy->num] = NULL;
13148d318a50SLinus Walleij 
13158d318a50SLinus Walleij 	return 0;
13168d318a50SLinus Walleij }
13178d318a50SLinus Walleij 
13188d318a50SLinus Walleij static int d40_pause(struct dma_chan *chan)
13198d318a50SLinus Walleij {
13208d318a50SLinus Walleij 	struct d40_chan *d40c =
13218d318a50SLinus Walleij 		container_of(chan, struct d40_chan, chan);
13228d318a50SLinus Walleij 	int res;
13238d318a50SLinus Walleij 	unsigned long flags;
13248d318a50SLinus Walleij 
13258d318a50SLinus Walleij 	spin_lock_irqsave(&d40c->lock, flags);
13268d318a50SLinus Walleij 
13278d318a50SLinus Walleij 	res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
13288d318a50SLinus Walleij 	if (res == 0) {
13298d318a50SLinus Walleij 		if (d40c->log_num != D40_PHY_CHAN) {
13308d318a50SLinus Walleij 			d40_config_set_event(d40c, false);
13318d318a50SLinus Walleij 			/* Resume the other logical channels if any */
13328d318a50SLinus Walleij 			if (d40_chan_has_events(d40c))
13338d318a50SLinus Walleij 				res = d40_channel_execute_command(d40c,
13348d318a50SLinus Walleij 								  D40_DMA_RUN);
13358d318a50SLinus Walleij 		}
13368d318a50SLinus Walleij 	}
13378d318a50SLinus Walleij 
13388d318a50SLinus Walleij 	spin_unlock_irqrestore(&d40c->lock, flags);
13398d318a50SLinus Walleij 	return res;
13408d318a50SLinus Walleij }
13418d318a50SLinus Walleij 
1342a5ebca47SJonas Aaberg static bool d40_is_paused(struct d40_chan *d40c)
1343a5ebca47SJonas Aaberg {
1344a5ebca47SJonas Aaberg 	bool is_paused = false;
1345a5ebca47SJonas Aaberg 	unsigned long flags;
1346a5ebca47SJonas Aaberg 	void __iomem *active_reg;
1347a5ebca47SJonas Aaberg 	u32 status;
1348a5ebca47SJonas Aaberg 	u32 event;
1349a5ebca47SJonas Aaberg 
1350a5ebca47SJonas Aaberg 	spin_lock_irqsave(&d40c->lock, flags);
1351a5ebca47SJonas Aaberg 
1352a5ebca47SJonas Aaberg 	if (d40c->log_num == D40_PHY_CHAN) {
1353a5ebca47SJonas Aaberg 		if (d40c->phy_chan->num % 2 == 0)
1354a5ebca47SJonas Aaberg 			active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1355a5ebca47SJonas Aaberg 		else
1356a5ebca47SJonas Aaberg 			active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1357a5ebca47SJonas Aaberg 
1358a5ebca47SJonas Aaberg 		status = (readl(active_reg) &
1359a5ebca47SJonas Aaberg 			  D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1360a5ebca47SJonas Aaberg 			D40_CHAN_POS(d40c->phy_chan->num);
1361a5ebca47SJonas Aaberg 		if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1362a5ebca47SJonas Aaberg 			is_paused = true;
1363a5ebca47SJonas Aaberg 
1364a5ebca47SJonas Aaberg 		goto _exit;
1365a5ebca47SJonas Aaberg 	}
1366a5ebca47SJonas Aaberg 
1367a5ebca47SJonas Aaberg 	if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1368a5ebca47SJonas Aaberg 	    d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM)
1369a5ebca47SJonas Aaberg 		event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1370a5ebca47SJonas Aaberg 	else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1371a5ebca47SJonas Aaberg 		event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1372a5ebca47SJonas Aaberg 	else {
1373a5ebca47SJonas Aaberg 		dev_err(&d40c->chan.dev->device,
1374a5ebca47SJonas Aaberg 			"[%s] Unknown direction\n", __func__);
1375a5ebca47SJonas Aaberg 		goto _exit;
1376a5ebca47SJonas Aaberg 	}
1377a5ebca47SJonas Aaberg 	status = d40_chan_has_events(d40c);
1378a5ebca47SJonas Aaberg 	status = (status & D40_EVENTLINE_MASK(event)) >>
1379a5ebca47SJonas Aaberg 		D40_EVENTLINE_POS(event);
1380a5ebca47SJonas Aaberg 
1381a5ebca47SJonas Aaberg 	if (status != D40_DMA_RUN)
1382a5ebca47SJonas Aaberg 		is_paused = true;
1383a5ebca47SJonas Aaberg _exit:
1384a5ebca47SJonas Aaberg 	spin_unlock_irqrestore(&d40c->lock, flags);
1385a5ebca47SJonas Aaberg 	return is_paused;
1386a5ebca47SJonas Aaberg 
1387a5ebca47SJonas Aaberg }
1388a5ebca47SJonas Aaberg 
1389a5ebca47SJonas Aaberg 
13908d318a50SLinus Walleij static bool d40_tx_is_linked(struct d40_chan *d40c)
13918d318a50SLinus Walleij {
13928d318a50SLinus Walleij 	bool is_link;
13938d318a50SLinus Walleij 
13948d318a50SLinus Walleij 	if (d40c->log_num != D40_PHY_CHAN)
13958d318a50SLinus Walleij 		is_link = readl(&d40c->lcpa->lcsp3) &  D40_MEM_LCSP3_DLOS_MASK;
13968d318a50SLinus Walleij 	else
13978d318a50SLinus Walleij 		is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
13988d318a50SLinus Walleij 				d40c->phy_chan->num * D40_DREG_PCDELTA +
13998d318a50SLinus Walleij 				D40_CHAN_REG_SDLNK) &
14008d318a50SLinus Walleij 			D40_SREG_LNK_PHYS_LNK_MASK;
14018d318a50SLinus Walleij 	return is_link;
14028d318a50SLinus Walleij }
14038d318a50SLinus Walleij 
14048d318a50SLinus Walleij static u32 d40_residue(struct d40_chan *d40c)
14058d318a50SLinus Walleij {
14068d318a50SLinus Walleij 	u32 num_elt;
14078d318a50SLinus Walleij 
14088d318a50SLinus Walleij 	if (d40c->log_num != D40_PHY_CHAN)
14098d318a50SLinus Walleij 		num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
14108d318a50SLinus Walleij 			>> D40_MEM_LCSP2_ECNT_POS;
14118d318a50SLinus Walleij 	else
14128d318a50SLinus Walleij 		num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
14138d318a50SLinus Walleij 				 d40c->phy_chan->num * D40_DREG_PCDELTA +
14148d318a50SLinus Walleij 				 D40_CHAN_REG_SDELT) &
1415508849adSLinus Walleij 			   D40_SREG_ELEM_PHY_ECNT_MASK) >>
1416508849adSLinus Walleij 			D40_SREG_ELEM_PHY_ECNT_POS;
14178d318a50SLinus Walleij 	return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
14188d318a50SLinus Walleij }
14198d318a50SLinus Walleij 
14208d318a50SLinus Walleij static int d40_resume(struct dma_chan *chan)
14218d318a50SLinus Walleij {
14228d318a50SLinus Walleij 	struct d40_chan *d40c =
14238d318a50SLinus Walleij 		container_of(chan, struct d40_chan, chan);
14248d318a50SLinus Walleij 	int res = 0;
14258d318a50SLinus Walleij 	unsigned long flags;
14268d318a50SLinus Walleij 
14278d318a50SLinus Walleij 	spin_lock_irqsave(&d40c->lock, flags);
14288d318a50SLinus Walleij 
14298d318a50SLinus Walleij 	/* If bytes left to transfer or linked tx resume job */
14308d318a50SLinus Walleij 	if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
14310c32269dSJonas Aaberg 		if (d40c->log_num != D40_PHY_CHAN)
14328d318a50SLinus Walleij 			d40_config_set_event(d40c, true);
14338d318a50SLinus Walleij 		res = d40_channel_execute_command(d40c, D40_DMA_RUN);
14348d318a50SLinus Walleij 	}
14358d318a50SLinus Walleij 
14368d318a50SLinus Walleij 	spin_unlock_irqrestore(&d40c->lock, flags);
14378d318a50SLinus Walleij 	return res;
14388d318a50SLinus Walleij }
14398d318a50SLinus Walleij 
14408d318a50SLinus Walleij static u32 stedma40_residue(struct dma_chan *chan)
14418d318a50SLinus Walleij {
14428d318a50SLinus Walleij 	struct d40_chan *d40c =
14438d318a50SLinus Walleij 		container_of(chan, struct d40_chan, chan);
14448d318a50SLinus Walleij 	u32 bytes_left;
14458d318a50SLinus Walleij 	unsigned long flags;
14468d318a50SLinus Walleij 
14478d318a50SLinus Walleij 	spin_lock_irqsave(&d40c->lock, flags);
14488d318a50SLinus Walleij 	bytes_left = d40_residue(d40c);
14498d318a50SLinus Walleij 	spin_unlock_irqrestore(&d40c->lock, flags);
14508d318a50SLinus Walleij 
14518d318a50SLinus Walleij 	return bytes_left;
14528d318a50SLinus Walleij }
14538d318a50SLinus Walleij 
14548d318a50SLinus Walleij /* Public DMA functions in addition to the DMA engine framework */
14558d318a50SLinus Walleij 
14568d318a50SLinus Walleij int stedma40_set_psize(struct dma_chan *chan,
14578d318a50SLinus Walleij 		       int src_psize,
14588d318a50SLinus Walleij 		       int dst_psize)
14598d318a50SLinus Walleij {
14608d318a50SLinus Walleij 	struct d40_chan *d40c =
14618d318a50SLinus Walleij 		container_of(chan, struct d40_chan, chan);
14628d318a50SLinus Walleij 	unsigned long flags;
14638d318a50SLinus Walleij 
14648d318a50SLinus Walleij 	spin_lock_irqsave(&d40c->lock, flags);
14658d318a50SLinus Walleij 
14668d318a50SLinus Walleij 	if (d40c->log_num != D40_PHY_CHAN) {
14678d318a50SLinus Walleij 		d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
14688d318a50SLinus Walleij 		d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1469508849adSLinus Walleij 		d40c->log_def.lcsp1 |= src_psize <<
1470508849adSLinus Walleij 			D40_MEM_LCSP1_SCFG_PSIZE_POS;
1471508849adSLinus Walleij 		d40c->log_def.lcsp3 |= dst_psize <<
1472508849adSLinus Walleij 			D40_MEM_LCSP1_SCFG_PSIZE_POS;
14738d318a50SLinus Walleij 		goto out;
14748d318a50SLinus Walleij 	}
14758d318a50SLinus Walleij 
14768d318a50SLinus Walleij 	if (src_psize == STEDMA40_PSIZE_PHY_1)
14778d318a50SLinus Walleij 		d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
14788d318a50SLinus Walleij 	else {
14798d318a50SLinus Walleij 		d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
14808d318a50SLinus Walleij 		d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
14818d318a50SLinus Walleij 				       D40_SREG_CFG_PSIZE_POS);
14828d318a50SLinus Walleij 		d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
14838d318a50SLinus Walleij 	}
14848d318a50SLinus Walleij 
14858d318a50SLinus Walleij 	if (dst_psize == STEDMA40_PSIZE_PHY_1)
14868d318a50SLinus Walleij 		d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
14878d318a50SLinus Walleij 	else {
14888d318a50SLinus Walleij 		d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
14898d318a50SLinus Walleij 		d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
14908d318a50SLinus Walleij 				       D40_SREG_CFG_PSIZE_POS);
14918d318a50SLinus Walleij 		d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
14928d318a50SLinus Walleij 	}
14938d318a50SLinus Walleij out:
14948d318a50SLinus Walleij 	spin_unlock_irqrestore(&d40c->lock, flags);
14958d318a50SLinus Walleij 	return 0;
14968d318a50SLinus Walleij }
14978d318a50SLinus Walleij EXPORT_SYMBOL(stedma40_set_psize);
14988d318a50SLinus Walleij 
14998d318a50SLinus Walleij struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
15008d318a50SLinus Walleij 						   struct scatterlist *sgl_dst,
15018d318a50SLinus Walleij 						   struct scatterlist *sgl_src,
15028d318a50SLinus Walleij 						   unsigned int sgl_len,
15032a614340SJonas Aaberg 						   unsigned long dma_flags)
15048d318a50SLinus Walleij {
15058d318a50SLinus Walleij 	int res;
15068d318a50SLinus Walleij 	struct d40_desc *d40d;
15078d318a50SLinus Walleij 	struct d40_chan *d40c = container_of(chan, struct d40_chan,
15088d318a50SLinus Walleij 					     chan);
15092a614340SJonas Aaberg 	unsigned long flags;
15108d318a50SLinus Walleij 
15110d0f6b8bSJonas Aaberg 	if (d40c->phy_chan == NULL) {
15120d0f6b8bSJonas Aaberg 		dev_err(&d40c->chan.dev->device,
15130d0f6b8bSJonas Aaberg 			"[%s] Unallocated channel.\n", __func__);
15140d0f6b8bSJonas Aaberg 		return ERR_PTR(-EINVAL);
15150d0f6b8bSJonas Aaberg 	}
15160d0f6b8bSJonas Aaberg 
15172a614340SJonas Aaberg 	spin_lock_irqsave(&d40c->lock, flags);
15188d318a50SLinus Walleij 	d40d = d40_desc_get(d40c);
15198d318a50SLinus Walleij 
15208d318a50SLinus Walleij 	if (d40d == NULL)
15218d318a50SLinus Walleij 		goto err;
15228d318a50SLinus Walleij 
15238d318a50SLinus Walleij 	d40d->lli_len = sgl_len;
1524941b77a3SPer Friden 	d40d->lli_tx_len = d40d->lli_len;
15252a614340SJonas Aaberg 	d40d->txd.flags = dma_flags;
15268d318a50SLinus Walleij 
15278d318a50SLinus Walleij 	if (d40c->log_num != D40_PHY_CHAN) {
1528941b77a3SPer Friden 		if (d40d->lli_len > d40c->base->plat_data->llis_per_log)
1529941b77a3SPer Friden 			d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1530941b77a3SPer Friden 
15318d318a50SLinus Walleij 		if (sgl_len > 1)
15328d318a50SLinus Walleij 			/*
15338d318a50SLinus Walleij 			 * Check if there is space available in lcla. If not,
15348d318a50SLinus Walleij 			 * split list into 1-length and run only in lcpa
15358d318a50SLinus Walleij 			 * space.
15368d318a50SLinus Walleij 			 */
1537508849adSLinus Walleij 			if (d40_lcla_id_get(d40c) != 0)
1538941b77a3SPer Friden 				d40d->lli_tx_len = 1;
15398d318a50SLinus Walleij 
15408d318a50SLinus Walleij 		if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
15418d318a50SLinus Walleij 			dev_err(&d40c->chan.dev->device,
15428d318a50SLinus Walleij 				"[%s] Out of memory\n", __func__);
15438d318a50SLinus Walleij 			goto err;
15448d318a50SLinus Walleij 		}
15458d318a50SLinus Walleij 
15468d318a50SLinus Walleij 		(void) d40_log_sg_to_lli(d40c->lcla.src_id,
15478d318a50SLinus Walleij 					 sgl_src,
15488d318a50SLinus Walleij 					 sgl_len,
15498d318a50SLinus Walleij 					 d40d->lli_log.src,
15508d318a50SLinus Walleij 					 d40c->log_def.lcsp1,
15518d318a50SLinus Walleij 					 d40c->dma_cfg.src_info.data_width,
15522a614340SJonas Aaberg 					 dma_flags & DMA_PREP_INTERRUPT,
1553941b77a3SPer Friden 					 d40d->lli_tx_len,
15548d318a50SLinus Walleij 					 d40c->base->plat_data->llis_per_log);
15558d318a50SLinus Walleij 
15568d318a50SLinus Walleij 		(void) d40_log_sg_to_lli(d40c->lcla.dst_id,
15578d318a50SLinus Walleij 					 sgl_dst,
15588d318a50SLinus Walleij 					 sgl_len,
15598d318a50SLinus Walleij 					 d40d->lli_log.dst,
15608d318a50SLinus Walleij 					 d40c->log_def.lcsp3,
15618d318a50SLinus Walleij 					 d40c->dma_cfg.dst_info.data_width,
15622a614340SJonas Aaberg 					 dma_flags & DMA_PREP_INTERRUPT,
1563941b77a3SPer Friden 					 d40d->lli_tx_len,
15648d318a50SLinus Walleij 					 d40c->base->plat_data->llis_per_log);
15658d318a50SLinus Walleij 
15668d318a50SLinus Walleij 
15678d318a50SLinus Walleij 	} else {
15688d318a50SLinus Walleij 		if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
15698d318a50SLinus Walleij 			dev_err(&d40c->chan.dev->device,
15708d318a50SLinus Walleij 				"[%s] Out of memory\n", __func__);
15718d318a50SLinus Walleij 			goto err;
15728d318a50SLinus Walleij 		}
15738d318a50SLinus Walleij 
15748d318a50SLinus Walleij 		res = d40_phy_sg_to_lli(sgl_src,
15758d318a50SLinus Walleij 					sgl_len,
15768d318a50SLinus Walleij 					0,
15778d318a50SLinus Walleij 					d40d->lli_phy.src,
15788d318a50SLinus Walleij 					d40d->lli_phy.src_addr,
15798d318a50SLinus Walleij 					d40c->src_def_cfg,
15808d318a50SLinus Walleij 					d40c->dma_cfg.src_info.data_width,
15818d318a50SLinus Walleij 					d40c->dma_cfg.src_info.psize,
15828d318a50SLinus Walleij 					true);
15838d318a50SLinus Walleij 
15848d318a50SLinus Walleij 		if (res < 0)
15858d318a50SLinus Walleij 			goto err;
15868d318a50SLinus Walleij 
15878d318a50SLinus Walleij 		res = d40_phy_sg_to_lli(sgl_dst,
15888d318a50SLinus Walleij 					sgl_len,
15898d318a50SLinus Walleij 					0,
15908d318a50SLinus Walleij 					d40d->lli_phy.dst,
15918d318a50SLinus Walleij 					d40d->lli_phy.dst_addr,
15928d318a50SLinus Walleij 					d40c->dst_def_cfg,
15938d318a50SLinus Walleij 					d40c->dma_cfg.dst_info.data_width,
15948d318a50SLinus Walleij 					d40c->dma_cfg.dst_info.psize,
15958d318a50SLinus Walleij 					true);
15968d318a50SLinus Walleij 
15978d318a50SLinus Walleij 		if (res < 0)
15988d318a50SLinus Walleij 			goto err;
15998d318a50SLinus Walleij 
16008d318a50SLinus Walleij 		(void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
16018d318a50SLinus Walleij 				      d40d->lli_pool.size, DMA_TO_DEVICE);
16028d318a50SLinus Walleij 	}
16038d318a50SLinus Walleij 
16048d318a50SLinus Walleij 	dma_async_tx_descriptor_init(&d40d->txd, chan);
16058d318a50SLinus Walleij 
16068d318a50SLinus Walleij 	d40d->txd.tx_submit = d40_tx_submit;
16078d318a50SLinus Walleij 
16082a614340SJonas Aaberg 	spin_unlock_irqrestore(&d40c->lock, flags);
16098d318a50SLinus Walleij 
16108d318a50SLinus Walleij 	return &d40d->txd;
16118d318a50SLinus Walleij err:
16122a614340SJonas Aaberg 	spin_unlock_irqrestore(&d40c->lock, flags);
16138d318a50SLinus Walleij 	return NULL;
16148d318a50SLinus Walleij }
16158d318a50SLinus Walleij EXPORT_SYMBOL(stedma40_memcpy_sg);
16168d318a50SLinus Walleij 
16178d318a50SLinus Walleij bool stedma40_filter(struct dma_chan *chan, void *data)
16188d318a50SLinus Walleij {
16198d318a50SLinus Walleij 	struct stedma40_chan_cfg *info = data;
16208d318a50SLinus Walleij 	struct d40_chan *d40c =
16218d318a50SLinus Walleij 		container_of(chan, struct d40_chan, chan);
16228d318a50SLinus Walleij 	int err;
16238d318a50SLinus Walleij 
16248d318a50SLinus Walleij 	if (data) {
16258d318a50SLinus Walleij 		err = d40_validate_conf(d40c, info);
16268d318a50SLinus Walleij 		if (!err)
16278d318a50SLinus Walleij 			d40c->dma_cfg = *info;
16288d318a50SLinus Walleij 	} else
16298d318a50SLinus Walleij 		err = d40_config_memcpy(d40c);
16308d318a50SLinus Walleij 
16318d318a50SLinus Walleij 	return err == 0;
16328d318a50SLinus Walleij }
16338d318a50SLinus Walleij EXPORT_SYMBOL(stedma40_filter);
16348d318a50SLinus Walleij 
16358d318a50SLinus Walleij /* DMA ENGINE functions */
16368d318a50SLinus Walleij static int d40_alloc_chan_resources(struct dma_chan *chan)
16378d318a50SLinus Walleij {
16388d318a50SLinus Walleij 	int err;
16398d318a50SLinus Walleij 	unsigned long flags;
16408d318a50SLinus Walleij 	struct d40_chan *d40c =
16418d318a50SLinus Walleij 		container_of(chan, struct d40_chan, chan);
1642ef1872ecSLinus Walleij 	bool is_free_phy;
16438d318a50SLinus Walleij 	spin_lock_irqsave(&d40c->lock, flags);
16448d318a50SLinus Walleij 
16458d318a50SLinus Walleij 	d40c->completed = chan->cookie = 1;
16468d318a50SLinus Walleij 
16478d318a50SLinus Walleij 	/*
16488d318a50SLinus Walleij 	 * If no dma configuration is set (channel_type == 0)
1649ef1872ecSLinus Walleij 	 * use default configuration (memcpy)
16508d318a50SLinus Walleij 	 */
16518d318a50SLinus Walleij 	if (d40c->dma_cfg.channel_type == 0) {
16528d318a50SLinus Walleij 		err = d40_config_memcpy(d40c);
1653ff0b12baSJonas Aaberg 		if (err) {
1654ff0b12baSJonas Aaberg 			dev_err(&d40c->chan.dev->device,
1655ff0b12baSJonas Aaberg 				"[%s] Failed to configure memcpy channel\n",
1656ff0b12baSJonas Aaberg 				__func__);
1657ff0b12baSJonas Aaberg 			goto fail;
1658ff0b12baSJonas Aaberg 		}
16598d318a50SLinus Walleij 	}
1660ef1872ecSLinus Walleij 	is_free_phy = (d40c->phy_chan == NULL);
16618d318a50SLinus Walleij 
16628d318a50SLinus Walleij 	err = d40_allocate_channel(d40c);
16638d318a50SLinus Walleij 	if (err) {
16648d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
16658d318a50SLinus Walleij 			"[%s] Failed to allocate channel\n", __func__);
1666ff0b12baSJonas Aaberg 		goto fail;
16678d318a50SLinus Walleij 	}
16688d318a50SLinus Walleij 
1669ef1872ecSLinus Walleij 	/* Fill in basic CFG register values */
1670ef1872ecSLinus Walleij 	d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1671ef1872ecSLinus Walleij 		    &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
1672ef1872ecSLinus Walleij 
1673ef1872ecSLinus Walleij 	if (d40c->log_num != D40_PHY_CHAN) {
1674ef1872ecSLinus Walleij 		d40_log_cfg(&d40c->dma_cfg,
1675ef1872ecSLinus Walleij 			    &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1676ef1872ecSLinus Walleij 
1677ef1872ecSLinus Walleij 		if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1678ef1872ecSLinus Walleij 			d40c->lcpa = d40c->base->lcpa_base +
1679ef1872ecSLinus Walleij 			  d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
1680ef1872ecSLinus Walleij 		else
1681ef1872ecSLinus Walleij 			d40c->lcpa = d40c->base->lcpa_base +
1682ef1872ecSLinus Walleij 			  d40c->dma_cfg.dst_dev_type *
1683ef1872ecSLinus Walleij 			  D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
1684ef1872ecSLinus Walleij 	}
1685ef1872ecSLinus Walleij 
1686ef1872ecSLinus Walleij 	/*
1687ef1872ecSLinus Walleij 	 * Only write channel configuration to the DMA if the physical
1688ef1872ecSLinus Walleij 	 * resource is free. In case of multiple logical channels
1689ef1872ecSLinus Walleij 	 * on the same physical resource, only the first write is necessary.
1690ef1872ecSLinus Walleij 	 */
1691ef1872ecSLinus Walleij 	if (is_free_phy) {
1692ef1872ecSLinus Walleij 		err = d40_config_write(d40c);
16938d318a50SLinus Walleij 		if (err) {
16948d318a50SLinus Walleij 			dev_err(&d40c->chan.dev->device,
16958d318a50SLinus Walleij 				"[%s] Failed to configure channel\n",
16968d318a50SLinus Walleij 				__func__);
1697ef1872ecSLinus Walleij 		}
16988d318a50SLinus Walleij 	}
1699ff0b12baSJonas Aaberg fail:
17008d318a50SLinus Walleij 	spin_unlock_irqrestore(&d40c->lock, flags);
1701ff0b12baSJonas Aaberg 	return err;
17028d318a50SLinus Walleij }
17038d318a50SLinus Walleij 
17048d318a50SLinus Walleij static void d40_free_chan_resources(struct dma_chan *chan)
17058d318a50SLinus Walleij {
17068d318a50SLinus Walleij 	struct d40_chan *d40c =
17078d318a50SLinus Walleij 		container_of(chan, struct d40_chan, chan);
17088d318a50SLinus Walleij 	int err;
17098d318a50SLinus Walleij 	unsigned long flags;
17108d318a50SLinus Walleij 
17110d0f6b8bSJonas Aaberg 	if (d40c->phy_chan == NULL) {
17120d0f6b8bSJonas Aaberg 		dev_err(&d40c->chan.dev->device,
17130d0f6b8bSJonas Aaberg 			"[%s] Cannot free unallocated channel\n", __func__);
17140d0f6b8bSJonas Aaberg 		return;
17150d0f6b8bSJonas Aaberg 	}
17160d0f6b8bSJonas Aaberg 
17170d0f6b8bSJonas Aaberg 
17188d318a50SLinus Walleij 	spin_lock_irqsave(&d40c->lock, flags);
17198d318a50SLinus Walleij 
17208d318a50SLinus Walleij 	err = d40_free_dma(d40c);
17218d318a50SLinus Walleij 
17228d318a50SLinus Walleij 	if (err)
17238d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
17248d318a50SLinus Walleij 			"[%s] Failed to free channel\n", __func__);
17258d318a50SLinus Walleij 	spin_unlock_irqrestore(&d40c->lock, flags);
17268d318a50SLinus Walleij }
17278d318a50SLinus Walleij 
17288d318a50SLinus Walleij static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
17298d318a50SLinus Walleij 						       dma_addr_t dst,
17308d318a50SLinus Walleij 						       dma_addr_t src,
17318d318a50SLinus Walleij 						       size_t size,
17322a614340SJonas Aaberg 						       unsigned long dma_flags)
17338d318a50SLinus Walleij {
17348d318a50SLinus Walleij 	struct d40_desc *d40d;
17358d318a50SLinus Walleij 	struct d40_chan *d40c = container_of(chan, struct d40_chan,
17368d318a50SLinus Walleij 					     chan);
17372a614340SJonas Aaberg 	unsigned long flags;
17388d318a50SLinus Walleij 	int err = 0;
17398d318a50SLinus Walleij 
17400d0f6b8bSJonas Aaberg 	if (d40c->phy_chan == NULL) {
17410d0f6b8bSJonas Aaberg 		dev_err(&d40c->chan.dev->device,
17420d0f6b8bSJonas Aaberg 			"[%s] Channel is not allocated.\n", __func__);
17430d0f6b8bSJonas Aaberg 		return ERR_PTR(-EINVAL);
17440d0f6b8bSJonas Aaberg 	}
17450d0f6b8bSJonas Aaberg 
17462a614340SJonas Aaberg 	spin_lock_irqsave(&d40c->lock, flags);
17478d318a50SLinus Walleij 	d40d = d40_desc_get(d40c);
17488d318a50SLinus Walleij 
17498d318a50SLinus Walleij 	if (d40d == NULL) {
17508d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
17518d318a50SLinus Walleij 			"[%s] Descriptor is NULL\n", __func__);
17528d318a50SLinus Walleij 		goto err;
17538d318a50SLinus Walleij 	}
17548d318a50SLinus Walleij 
17552a614340SJonas Aaberg 	d40d->txd.flags = dma_flags;
17568d318a50SLinus Walleij 
17578d318a50SLinus Walleij 	dma_async_tx_descriptor_init(&d40d->txd, chan);
17588d318a50SLinus Walleij 
17598d318a50SLinus Walleij 	d40d->txd.tx_submit = d40_tx_submit;
17608d318a50SLinus Walleij 
17618d318a50SLinus Walleij 	if (d40c->log_num != D40_PHY_CHAN) {
17628d318a50SLinus Walleij 
17638d318a50SLinus Walleij 		if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
17648d318a50SLinus Walleij 			dev_err(&d40c->chan.dev->device,
17658d318a50SLinus Walleij 				"[%s] Out of memory\n", __func__);
17668d318a50SLinus Walleij 			goto err;
17678d318a50SLinus Walleij 		}
17688d318a50SLinus Walleij 		d40d->lli_len = 1;
1769941b77a3SPer Friden 		d40d->lli_tx_len = 1;
17708d318a50SLinus Walleij 
17718d318a50SLinus Walleij 		d40_log_fill_lli(d40d->lli_log.src,
17728d318a50SLinus Walleij 				 src,
17738d318a50SLinus Walleij 				 size,
17748d318a50SLinus Walleij 				 0,
17758d318a50SLinus Walleij 				 d40c->log_def.lcsp1,
17768d318a50SLinus Walleij 				 d40c->dma_cfg.src_info.data_width,
17772123a61eSJonas Aaberg 				 false, true);
17788d318a50SLinus Walleij 
17798d318a50SLinus Walleij 		d40_log_fill_lli(d40d->lli_log.dst,
17808d318a50SLinus Walleij 				 dst,
17818d318a50SLinus Walleij 				 size,
17828d318a50SLinus Walleij 				 0,
17838d318a50SLinus Walleij 				 d40c->log_def.lcsp3,
17848d318a50SLinus Walleij 				 d40c->dma_cfg.dst_info.data_width,
17858d318a50SLinus Walleij 				 true, true);
17868d318a50SLinus Walleij 
17878d318a50SLinus Walleij 	} else {
17888d318a50SLinus Walleij 
17898d318a50SLinus Walleij 		if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
17908d318a50SLinus Walleij 			dev_err(&d40c->chan.dev->device,
17918d318a50SLinus Walleij 				"[%s] Out of memory\n", __func__);
17928d318a50SLinus Walleij 			goto err;
17938d318a50SLinus Walleij 		}
17948d318a50SLinus Walleij 
17958d318a50SLinus Walleij 		err = d40_phy_fill_lli(d40d->lli_phy.src,
17968d318a50SLinus Walleij 				       src,
17978d318a50SLinus Walleij 				       size,
17988d318a50SLinus Walleij 				       d40c->dma_cfg.src_info.psize,
17998d318a50SLinus Walleij 				       0,
18008d318a50SLinus Walleij 				       d40c->src_def_cfg,
18018d318a50SLinus Walleij 				       true,
18028d318a50SLinus Walleij 				       d40c->dma_cfg.src_info.data_width,
18038d318a50SLinus Walleij 				       false);
18048d318a50SLinus Walleij 		if (err)
18058d318a50SLinus Walleij 			goto err_fill_lli;
18068d318a50SLinus Walleij 
18078d318a50SLinus Walleij 		err = d40_phy_fill_lli(d40d->lli_phy.dst,
18088d318a50SLinus Walleij 				       dst,
18098d318a50SLinus Walleij 				       size,
18108d318a50SLinus Walleij 				       d40c->dma_cfg.dst_info.psize,
18118d318a50SLinus Walleij 				       0,
18128d318a50SLinus Walleij 				       d40c->dst_def_cfg,
18138d318a50SLinus Walleij 				       true,
18148d318a50SLinus Walleij 				       d40c->dma_cfg.dst_info.data_width,
18158d318a50SLinus Walleij 				       false);
18168d318a50SLinus Walleij 
18178d318a50SLinus Walleij 		if (err)
18188d318a50SLinus Walleij 			goto err_fill_lli;
18198d318a50SLinus Walleij 
18208d318a50SLinus Walleij 		(void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
18218d318a50SLinus Walleij 				      d40d->lli_pool.size, DMA_TO_DEVICE);
18228d318a50SLinus Walleij 	}
18238d318a50SLinus Walleij 
18242a614340SJonas Aaberg 	spin_unlock_irqrestore(&d40c->lock, flags);
18258d318a50SLinus Walleij 	return &d40d->txd;
18268d318a50SLinus Walleij 
18278d318a50SLinus Walleij err_fill_lli:
18288d318a50SLinus Walleij 	dev_err(&d40c->chan.dev->device,
18298d318a50SLinus Walleij 		"[%s] Failed filling in PHY LLI\n", __func__);
18308d318a50SLinus Walleij 	d40_pool_lli_free(d40d);
18318d318a50SLinus Walleij err:
18322a614340SJonas Aaberg 	spin_unlock_irqrestore(&d40c->lock, flags);
18338d318a50SLinus Walleij 	return NULL;
18348d318a50SLinus Walleij }
18358d318a50SLinus Walleij 
18368d318a50SLinus Walleij static int d40_prep_slave_sg_log(struct d40_desc *d40d,
18378d318a50SLinus Walleij 				 struct d40_chan *d40c,
18388d318a50SLinus Walleij 				 struct scatterlist *sgl,
18398d318a50SLinus Walleij 				 unsigned int sg_len,
18408d318a50SLinus Walleij 				 enum dma_data_direction direction,
18412a614340SJonas Aaberg 				 unsigned long dma_flags)
18428d318a50SLinus Walleij {
18438d318a50SLinus Walleij 	dma_addr_t dev_addr = 0;
18448d318a50SLinus Walleij 	int total_size;
18458d318a50SLinus Walleij 
18468d318a50SLinus Walleij 	if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
18478d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
18488d318a50SLinus Walleij 			"[%s] Out of memory\n", __func__);
18498d318a50SLinus Walleij 		return -ENOMEM;
18508d318a50SLinus Walleij 	}
18518d318a50SLinus Walleij 
18528d318a50SLinus Walleij 	d40d->lli_len = sg_len;
1853941b77a3SPer Friden 	if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
1854941b77a3SPer Friden 		d40d->lli_tx_len = d40d->lli_len;
1855941b77a3SPer Friden 	else
1856941b77a3SPer Friden 		d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
18578d318a50SLinus Walleij 
18588d318a50SLinus Walleij 	if (sg_len > 1)
18598d318a50SLinus Walleij 		/*
18608d318a50SLinus Walleij 		 * Check if there is space available in lcla.
18618d318a50SLinus Walleij 		 * If not, split list into 1-length and run only
18628d318a50SLinus Walleij 		 * in lcpa space.
18638d318a50SLinus Walleij 		 */
1864508849adSLinus Walleij 		if (d40_lcla_id_get(d40c) != 0)
1865941b77a3SPer Friden 			d40d->lli_tx_len = 1;
18668d318a50SLinus Walleij 
18672a614340SJonas Aaberg 	if (direction == DMA_FROM_DEVICE)
18688d318a50SLinus Walleij 		dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
18692a614340SJonas Aaberg 	else if (direction == DMA_TO_DEVICE)
18708d318a50SLinus Walleij 		dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
18712a614340SJonas Aaberg 	else
18722a614340SJonas Aaberg 		return -EINVAL;
18732a614340SJonas Aaberg 
18748d318a50SLinus Walleij 	total_size = d40_log_sg_to_dev(&d40c->lcla,
18758d318a50SLinus Walleij 				       sgl, sg_len,
18768d318a50SLinus Walleij 				       &d40d->lli_log,
18778d318a50SLinus Walleij 				       &d40c->log_def,
18788d318a50SLinus Walleij 				       d40c->dma_cfg.src_info.data_width,
18798d318a50SLinus Walleij 				       d40c->dma_cfg.dst_info.data_width,
18808d318a50SLinus Walleij 				       direction,
18812a614340SJonas Aaberg 				       dma_flags & DMA_PREP_INTERRUPT,
1882941b77a3SPer Friden 				       dev_addr, d40d->lli_tx_len,
18838d318a50SLinus Walleij 				       d40c->base->plat_data->llis_per_log);
18842a614340SJonas Aaberg 
18858d318a50SLinus Walleij 	if (total_size < 0)
18868d318a50SLinus Walleij 		return -EINVAL;
18878d318a50SLinus Walleij 
18888d318a50SLinus Walleij 	return 0;
18898d318a50SLinus Walleij }
18908d318a50SLinus Walleij 
18918d318a50SLinus Walleij static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
18928d318a50SLinus Walleij 				 struct d40_chan *d40c,
18938d318a50SLinus Walleij 				 struct scatterlist *sgl,
18948d318a50SLinus Walleij 				 unsigned int sgl_len,
18958d318a50SLinus Walleij 				 enum dma_data_direction direction,
18962a614340SJonas Aaberg 				 unsigned long dma_flags)
18978d318a50SLinus Walleij {
18988d318a50SLinus Walleij 	dma_addr_t src_dev_addr;
18998d318a50SLinus Walleij 	dma_addr_t dst_dev_addr;
19008d318a50SLinus Walleij 	int res;
19018d318a50SLinus Walleij 
19028d318a50SLinus Walleij 	if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
19038d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
19048d318a50SLinus Walleij 			"[%s] Out of memory\n", __func__);
19058d318a50SLinus Walleij 		return -ENOMEM;
19068d318a50SLinus Walleij 	}
19078d318a50SLinus Walleij 
19088d318a50SLinus Walleij 	d40d->lli_len = sgl_len;
1909941b77a3SPer Friden 	d40d->lli_tx_len = sgl_len;
19108d318a50SLinus Walleij 
19118d318a50SLinus Walleij 	if (direction == DMA_FROM_DEVICE) {
19128d318a50SLinus Walleij 		dst_dev_addr = 0;
19138d318a50SLinus Walleij 		src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
19148d318a50SLinus Walleij 	} else if (direction == DMA_TO_DEVICE) {
19158d318a50SLinus Walleij 		dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
19168d318a50SLinus Walleij 		src_dev_addr = 0;
19178d318a50SLinus Walleij 	} else
19188d318a50SLinus Walleij 		return -EINVAL;
19198d318a50SLinus Walleij 
19208d318a50SLinus Walleij 	res = d40_phy_sg_to_lli(sgl,
19218d318a50SLinus Walleij 				sgl_len,
19228d318a50SLinus Walleij 				src_dev_addr,
19238d318a50SLinus Walleij 				d40d->lli_phy.src,
19248d318a50SLinus Walleij 				d40d->lli_phy.src_addr,
19258d318a50SLinus Walleij 				d40c->src_def_cfg,
19268d318a50SLinus Walleij 				d40c->dma_cfg.src_info.data_width,
19278d318a50SLinus Walleij 				d40c->dma_cfg.src_info.psize,
19288d318a50SLinus Walleij 				true);
19298d318a50SLinus Walleij 	if (res < 0)
19308d318a50SLinus Walleij 		return res;
19318d318a50SLinus Walleij 
19328d318a50SLinus Walleij 	res = d40_phy_sg_to_lli(sgl,
19338d318a50SLinus Walleij 				sgl_len,
19348d318a50SLinus Walleij 				dst_dev_addr,
19358d318a50SLinus Walleij 				d40d->lli_phy.dst,
19368d318a50SLinus Walleij 				d40d->lli_phy.dst_addr,
19378d318a50SLinus Walleij 				d40c->dst_def_cfg,
19388d318a50SLinus Walleij 				d40c->dma_cfg.dst_info.data_width,
19398d318a50SLinus Walleij 				d40c->dma_cfg.dst_info.psize,
19408d318a50SLinus Walleij 				 true);
19418d318a50SLinus Walleij 	if (res < 0)
19428d318a50SLinus Walleij 		return res;
19438d318a50SLinus Walleij 
19448d318a50SLinus Walleij 	(void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
19458d318a50SLinus Walleij 			      d40d->lli_pool.size, DMA_TO_DEVICE);
19468d318a50SLinus Walleij 	return 0;
19478d318a50SLinus Walleij }
19488d318a50SLinus Walleij 
19498d318a50SLinus Walleij static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
19508d318a50SLinus Walleij 							 struct scatterlist *sgl,
19518d318a50SLinus Walleij 							 unsigned int sg_len,
19528d318a50SLinus Walleij 							 enum dma_data_direction direction,
19532a614340SJonas Aaberg 							 unsigned long dma_flags)
19548d318a50SLinus Walleij {
19558d318a50SLinus Walleij 	struct d40_desc *d40d;
19568d318a50SLinus Walleij 	struct d40_chan *d40c = container_of(chan, struct d40_chan,
19578d318a50SLinus Walleij 					     chan);
19582a614340SJonas Aaberg 	unsigned long flags;
19598d318a50SLinus Walleij 	int err;
19608d318a50SLinus Walleij 
19610d0f6b8bSJonas Aaberg 	if (d40c->phy_chan == NULL) {
19620d0f6b8bSJonas Aaberg 		dev_err(&d40c->chan.dev->device,
19630d0f6b8bSJonas Aaberg 			"[%s] Cannot prepare unallocated channel\n", __func__);
19640d0f6b8bSJonas Aaberg 		return ERR_PTR(-EINVAL);
19650d0f6b8bSJonas Aaberg 	}
19660d0f6b8bSJonas Aaberg 
19678d318a50SLinus Walleij 	if (d40c->dma_cfg.pre_transfer)
19688d318a50SLinus Walleij 		d40c->dma_cfg.pre_transfer(chan,
19698d318a50SLinus Walleij 					   d40c->dma_cfg.pre_transfer_data,
19708d318a50SLinus Walleij 					   sg_dma_len(sgl));
19718d318a50SLinus Walleij 
19722a614340SJonas Aaberg 	spin_lock_irqsave(&d40c->lock, flags);
19738d318a50SLinus Walleij 	d40d = d40_desc_get(d40c);
19742a614340SJonas Aaberg 	spin_unlock_irqrestore(&d40c->lock, flags);
19758d318a50SLinus Walleij 
19768d318a50SLinus Walleij 	if (d40d == NULL)
19778d318a50SLinus Walleij 		return NULL;
19788d318a50SLinus Walleij 
19798d318a50SLinus Walleij 	if (d40c->log_num != D40_PHY_CHAN)
19808d318a50SLinus Walleij 		err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
19812a614340SJonas Aaberg 					    direction, dma_flags);
19828d318a50SLinus Walleij 	else
19838d318a50SLinus Walleij 		err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
19842a614340SJonas Aaberg 					    direction, dma_flags);
19858d318a50SLinus Walleij 	if (err) {
19868d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
19878d318a50SLinus Walleij 			"[%s] Failed to prepare %s slave sg job: %d\n",
19888d318a50SLinus Walleij 			__func__,
19898d318a50SLinus Walleij 			d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
19908d318a50SLinus Walleij 		return NULL;
19918d318a50SLinus Walleij 	}
19928d318a50SLinus Walleij 
19932a614340SJonas Aaberg 	d40d->txd.flags = dma_flags;
19948d318a50SLinus Walleij 
19958d318a50SLinus Walleij 	dma_async_tx_descriptor_init(&d40d->txd, chan);
19968d318a50SLinus Walleij 
19978d318a50SLinus Walleij 	d40d->txd.tx_submit = d40_tx_submit;
19988d318a50SLinus Walleij 
19998d318a50SLinus Walleij 	return &d40d->txd;
20008d318a50SLinus Walleij }
20018d318a50SLinus Walleij 
20028d318a50SLinus Walleij static enum dma_status d40_tx_status(struct dma_chan *chan,
20038d318a50SLinus Walleij 				     dma_cookie_t cookie,
20048d318a50SLinus Walleij 				     struct dma_tx_state *txstate)
20058d318a50SLinus Walleij {
20068d318a50SLinus Walleij 	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
20078d318a50SLinus Walleij 	dma_cookie_t last_used;
20088d318a50SLinus Walleij 	dma_cookie_t last_complete;
20098d318a50SLinus Walleij 	int ret;
20108d318a50SLinus Walleij 
20110d0f6b8bSJonas Aaberg 	if (d40c->phy_chan == NULL) {
20120d0f6b8bSJonas Aaberg 		dev_err(&d40c->chan.dev->device,
20130d0f6b8bSJonas Aaberg 			"[%s] Cannot read status of unallocated channel\n",
20140d0f6b8bSJonas Aaberg 			__func__);
20150d0f6b8bSJonas Aaberg 		return -EINVAL;
20160d0f6b8bSJonas Aaberg 	}
20170d0f6b8bSJonas Aaberg 
20188d318a50SLinus Walleij 	last_complete = d40c->completed;
20198d318a50SLinus Walleij 	last_used = chan->cookie;
20208d318a50SLinus Walleij 
2021a5ebca47SJonas Aaberg 	if (d40_is_paused(d40c))
2022a5ebca47SJonas Aaberg 		ret = DMA_PAUSED;
2023a5ebca47SJonas Aaberg 	else
20248d318a50SLinus Walleij 		ret = dma_async_is_complete(cookie, last_complete, last_used);
20258d318a50SLinus Walleij 
2026a5ebca47SJonas Aaberg 	dma_set_tx_state(txstate, last_complete, last_used,
2027a5ebca47SJonas Aaberg 			 stedma40_residue(chan));
20288d318a50SLinus Walleij 
20298d318a50SLinus Walleij 	return ret;
20308d318a50SLinus Walleij }
20318d318a50SLinus Walleij 
20328d318a50SLinus Walleij static void d40_issue_pending(struct dma_chan *chan)
20338d318a50SLinus Walleij {
20348d318a50SLinus Walleij 	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
20358d318a50SLinus Walleij 	unsigned long flags;
20368d318a50SLinus Walleij 
20370d0f6b8bSJonas Aaberg 	if (d40c->phy_chan == NULL) {
20380d0f6b8bSJonas Aaberg 		dev_err(&d40c->chan.dev->device,
20390d0f6b8bSJonas Aaberg 			"[%s] Channel is not allocated!\n", __func__);
20400d0f6b8bSJonas Aaberg 		return;
20410d0f6b8bSJonas Aaberg 	}
20420d0f6b8bSJonas Aaberg 
20438d318a50SLinus Walleij 	spin_lock_irqsave(&d40c->lock, flags);
20448d318a50SLinus Walleij 
20458d318a50SLinus Walleij 	/* Busy means that pending jobs are already being processed */
20468d318a50SLinus Walleij 	if (!d40c->busy)
20478d318a50SLinus Walleij 		(void) d40_queue_start(d40c);
20488d318a50SLinus Walleij 
20498d318a50SLinus Walleij 	spin_unlock_irqrestore(&d40c->lock, flags);
20508d318a50SLinus Walleij }
20518d318a50SLinus Walleij 
205205827630SLinus Walleij static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
205305827630SLinus Walleij 		       unsigned long arg)
20548d318a50SLinus Walleij {
20558d318a50SLinus Walleij 	unsigned long flags;
20568d318a50SLinus Walleij 	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
20578d318a50SLinus Walleij 
20580d0f6b8bSJonas Aaberg 	if (d40c->phy_chan == NULL) {
20590d0f6b8bSJonas Aaberg 		dev_err(&d40c->chan.dev->device,
20600d0f6b8bSJonas Aaberg 			"[%s] Channel is not allocated!\n", __func__);
20610d0f6b8bSJonas Aaberg 		return -EINVAL;
20620d0f6b8bSJonas Aaberg 	}
20630d0f6b8bSJonas Aaberg 
20648d318a50SLinus Walleij 	switch (cmd) {
20658d318a50SLinus Walleij 	case DMA_TERMINATE_ALL:
20668d318a50SLinus Walleij 		spin_lock_irqsave(&d40c->lock, flags);
20678d318a50SLinus Walleij 		d40_term_all(d40c);
20688d318a50SLinus Walleij 		spin_unlock_irqrestore(&d40c->lock, flags);
20698d318a50SLinus Walleij 		return 0;
20708d318a50SLinus Walleij 	case DMA_PAUSE:
20718d318a50SLinus Walleij 		return d40_pause(chan);
20728d318a50SLinus Walleij 	case DMA_RESUME:
20738d318a50SLinus Walleij 		return d40_resume(chan);
20748d318a50SLinus Walleij 	}
20758d318a50SLinus Walleij 
20768d318a50SLinus Walleij 	/* Other commands are unimplemented */
20778d318a50SLinus Walleij 	return -ENXIO;
20788d318a50SLinus Walleij }
20798d318a50SLinus Walleij 
20808d318a50SLinus Walleij /* Initialization functions */
20818d318a50SLinus Walleij 
20828d318a50SLinus Walleij static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
20838d318a50SLinus Walleij 				 struct d40_chan *chans, int offset,
20848d318a50SLinus Walleij 				 int num_chans)
20858d318a50SLinus Walleij {
20868d318a50SLinus Walleij 	int i = 0;
20878d318a50SLinus Walleij 	struct d40_chan *d40c;
20888d318a50SLinus Walleij 
20898d318a50SLinus Walleij 	INIT_LIST_HEAD(&dma->channels);
20908d318a50SLinus Walleij 
20918d318a50SLinus Walleij 	for (i = offset; i < offset + num_chans; i++) {
20928d318a50SLinus Walleij 		d40c = &chans[i];
20938d318a50SLinus Walleij 		d40c->base = base;
20948d318a50SLinus Walleij 		d40c->chan.device = dma;
20958d318a50SLinus Walleij 
20968d318a50SLinus Walleij 		/* Invalidate lcla element */
20978d318a50SLinus Walleij 		d40c->lcla.src_id = -1;
20988d318a50SLinus Walleij 		d40c->lcla.dst_id = -1;
20998d318a50SLinus Walleij 
21008d318a50SLinus Walleij 		spin_lock_init(&d40c->lock);
21018d318a50SLinus Walleij 
21028d318a50SLinus Walleij 		d40c->log_num = D40_PHY_CHAN;
21038d318a50SLinus Walleij 
21048d318a50SLinus Walleij 		INIT_LIST_HEAD(&d40c->active);
21058d318a50SLinus Walleij 		INIT_LIST_HEAD(&d40c->queue);
21068d318a50SLinus Walleij 		INIT_LIST_HEAD(&d40c->client);
21078d318a50SLinus Walleij 
21088d318a50SLinus Walleij 		tasklet_init(&d40c->tasklet, dma_tasklet,
21098d318a50SLinus Walleij 			     (unsigned long) d40c);
21108d318a50SLinus Walleij 
21118d318a50SLinus Walleij 		list_add_tail(&d40c->chan.device_node,
21128d318a50SLinus Walleij 			      &dma->channels);
21138d318a50SLinus Walleij 	}
21148d318a50SLinus Walleij }
21158d318a50SLinus Walleij 
21168d318a50SLinus Walleij static int __init d40_dmaengine_init(struct d40_base *base,
21178d318a50SLinus Walleij 				     int num_reserved_chans)
21188d318a50SLinus Walleij {
21198d318a50SLinus Walleij 	int err ;
21208d318a50SLinus Walleij 
21218d318a50SLinus Walleij 	d40_chan_init(base, &base->dma_slave, base->log_chans,
21228d318a50SLinus Walleij 		      0, base->num_log_chans);
21238d318a50SLinus Walleij 
21248d318a50SLinus Walleij 	dma_cap_zero(base->dma_slave.cap_mask);
21258d318a50SLinus Walleij 	dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
21268d318a50SLinus Walleij 
21278d318a50SLinus Walleij 	base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
21288d318a50SLinus Walleij 	base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
21298d318a50SLinus Walleij 	base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
21308d318a50SLinus Walleij 	base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
21318d318a50SLinus Walleij 	base->dma_slave.device_tx_status = d40_tx_status;
21328d318a50SLinus Walleij 	base->dma_slave.device_issue_pending = d40_issue_pending;
21338d318a50SLinus Walleij 	base->dma_slave.device_control = d40_control;
21348d318a50SLinus Walleij 	base->dma_slave.dev = base->dev;
21358d318a50SLinus Walleij 
21368d318a50SLinus Walleij 	err = dma_async_device_register(&base->dma_slave);
21378d318a50SLinus Walleij 
21388d318a50SLinus Walleij 	if (err) {
21398d318a50SLinus Walleij 		dev_err(base->dev,
21408d318a50SLinus Walleij 			"[%s] Failed to register slave channels\n",
21418d318a50SLinus Walleij 			__func__);
21428d318a50SLinus Walleij 		goto failure1;
21438d318a50SLinus Walleij 	}
21448d318a50SLinus Walleij 
21458d318a50SLinus Walleij 	d40_chan_init(base, &base->dma_memcpy, base->log_chans,
21468d318a50SLinus Walleij 		      base->num_log_chans, base->plat_data->memcpy_len);
21478d318a50SLinus Walleij 
21488d318a50SLinus Walleij 	dma_cap_zero(base->dma_memcpy.cap_mask);
21498d318a50SLinus Walleij 	dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
21508d318a50SLinus Walleij 
21518d318a50SLinus Walleij 	base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
21528d318a50SLinus Walleij 	base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
21538d318a50SLinus Walleij 	base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
21548d318a50SLinus Walleij 	base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
21558d318a50SLinus Walleij 	base->dma_memcpy.device_tx_status = d40_tx_status;
21568d318a50SLinus Walleij 	base->dma_memcpy.device_issue_pending = d40_issue_pending;
21578d318a50SLinus Walleij 	base->dma_memcpy.device_control = d40_control;
21588d318a50SLinus Walleij 	base->dma_memcpy.dev = base->dev;
21598d318a50SLinus Walleij 	/*
21608d318a50SLinus Walleij 	 * This controller can only access address at even
21618d318a50SLinus Walleij 	 * 32bit boundaries, i.e. 2^2
21628d318a50SLinus Walleij 	 */
21638d318a50SLinus Walleij 	base->dma_memcpy.copy_align = 2;
21648d318a50SLinus Walleij 
21658d318a50SLinus Walleij 	err = dma_async_device_register(&base->dma_memcpy);
21668d318a50SLinus Walleij 
21678d318a50SLinus Walleij 	if (err) {
21688d318a50SLinus Walleij 		dev_err(base->dev,
21698d318a50SLinus Walleij 			"[%s] Failed to regsiter memcpy only channels\n",
21708d318a50SLinus Walleij 			__func__);
21718d318a50SLinus Walleij 		goto failure2;
21728d318a50SLinus Walleij 	}
21738d318a50SLinus Walleij 
21748d318a50SLinus Walleij 	d40_chan_init(base, &base->dma_both, base->phy_chans,
21758d318a50SLinus Walleij 		      0, num_reserved_chans);
21768d318a50SLinus Walleij 
21778d318a50SLinus Walleij 	dma_cap_zero(base->dma_both.cap_mask);
21788d318a50SLinus Walleij 	dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
21798d318a50SLinus Walleij 	dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
21808d318a50SLinus Walleij 
21818d318a50SLinus Walleij 	base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
21828d318a50SLinus Walleij 	base->dma_both.device_free_chan_resources = d40_free_chan_resources;
21838d318a50SLinus Walleij 	base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
21848d318a50SLinus Walleij 	base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
21858d318a50SLinus Walleij 	base->dma_both.device_tx_status = d40_tx_status;
21868d318a50SLinus Walleij 	base->dma_both.device_issue_pending = d40_issue_pending;
21878d318a50SLinus Walleij 	base->dma_both.device_control = d40_control;
21888d318a50SLinus Walleij 	base->dma_both.dev = base->dev;
21898d318a50SLinus Walleij 	base->dma_both.copy_align = 2;
21908d318a50SLinus Walleij 	err = dma_async_device_register(&base->dma_both);
21918d318a50SLinus Walleij 
21928d318a50SLinus Walleij 	if (err) {
21938d318a50SLinus Walleij 		dev_err(base->dev,
21948d318a50SLinus Walleij 			"[%s] Failed to register logical and physical capable channels\n",
21958d318a50SLinus Walleij 			__func__);
21968d318a50SLinus Walleij 		goto failure3;
21978d318a50SLinus Walleij 	}
21988d318a50SLinus Walleij 	return 0;
21998d318a50SLinus Walleij failure3:
22008d318a50SLinus Walleij 	dma_async_device_unregister(&base->dma_memcpy);
22018d318a50SLinus Walleij failure2:
22028d318a50SLinus Walleij 	dma_async_device_unregister(&base->dma_slave);
22038d318a50SLinus Walleij failure1:
22048d318a50SLinus Walleij 	return err;
22058d318a50SLinus Walleij }
22068d318a50SLinus Walleij 
22078d318a50SLinus Walleij /* Initialization functions. */
22088d318a50SLinus Walleij 
22098d318a50SLinus Walleij static int __init d40_phy_res_init(struct d40_base *base)
22108d318a50SLinus Walleij {
22118d318a50SLinus Walleij 	int i;
22128d318a50SLinus Walleij 	int num_phy_chans_avail = 0;
22138d318a50SLinus Walleij 	u32 val[2];
22148d318a50SLinus Walleij 	int odd_even_bit = -2;
22158d318a50SLinus Walleij 
22168d318a50SLinus Walleij 	val[0] = readl(base->virtbase + D40_DREG_PRSME);
22178d318a50SLinus Walleij 	val[1] = readl(base->virtbase + D40_DREG_PRSMO);
22188d318a50SLinus Walleij 
22198d318a50SLinus Walleij 	for (i = 0; i < base->num_phy_chans; i++) {
22208d318a50SLinus Walleij 		base->phy_res[i].num = i;
22218d318a50SLinus Walleij 		odd_even_bit += 2 * ((i % 2) == 0);
22228d318a50SLinus Walleij 		if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
22238d318a50SLinus Walleij 			/* Mark security only channels as occupied */
22248d318a50SLinus Walleij 			base->phy_res[i].allocated_src = D40_ALLOC_PHY;
22258d318a50SLinus Walleij 			base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
22268d318a50SLinus Walleij 		} else {
22278d318a50SLinus Walleij 			base->phy_res[i].allocated_src = D40_ALLOC_FREE;
22288d318a50SLinus Walleij 			base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
22298d318a50SLinus Walleij 			num_phy_chans_avail++;
22308d318a50SLinus Walleij 		}
22318d318a50SLinus Walleij 		spin_lock_init(&base->phy_res[i].lock);
22328d318a50SLinus Walleij 	}
22338d318a50SLinus Walleij 	dev_info(base->dev, "%d of %d physical DMA channels available\n",
22348d318a50SLinus Walleij 		 num_phy_chans_avail, base->num_phy_chans);
22358d318a50SLinus Walleij 
22368d318a50SLinus Walleij 	/* Verify settings extended vs standard */
22378d318a50SLinus Walleij 	val[0] = readl(base->virtbase + D40_DREG_PRTYP);
22388d318a50SLinus Walleij 
22398d318a50SLinus Walleij 	for (i = 0; i < base->num_phy_chans; i++) {
22408d318a50SLinus Walleij 
22418d318a50SLinus Walleij 		if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
22428d318a50SLinus Walleij 		    (val[0] & 0x3) != 1)
22438d318a50SLinus Walleij 			dev_info(base->dev,
22448d318a50SLinus Walleij 				 "[%s] INFO: channel %d is misconfigured (%d)\n",
22458d318a50SLinus Walleij 				 __func__, i, val[0] & 0x3);
22468d318a50SLinus Walleij 
22478d318a50SLinus Walleij 		val[0] = val[0] >> 2;
22488d318a50SLinus Walleij 	}
22498d318a50SLinus Walleij 
22508d318a50SLinus Walleij 	return num_phy_chans_avail;
22518d318a50SLinus Walleij }
22528d318a50SLinus Walleij 
22538d318a50SLinus Walleij static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
22548d318a50SLinus Walleij {
22558d318a50SLinus Walleij 	static const struct d40_reg_val dma_id_regs[] = {
22568d318a50SLinus Walleij 		/* Peripheral Id */
22578d318a50SLinus Walleij 		{ .reg = D40_DREG_PERIPHID0, .val = 0x0040},
22588d318a50SLinus Walleij 		{ .reg = D40_DREG_PERIPHID1, .val = 0x0000},
22598d318a50SLinus Walleij 		/*
22608d318a50SLinus Walleij 		 * D40_DREG_PERIPHID2 Depends on HW revision:
22618d318a50SLinus Walleij 		 *  MOP500/HREF ED has 0x0008,
22628d318a50SLinus Walleij 		 *  ? has 0x0018,
22638d318a50SLinus Walleij 		 *  HREF V1 has 0x0028
22648d318a50SLinus Walleij 		 */
22658d318a50SLinus Walleij 		{ .reg = D40_DREG_PERIPHID3, .val = 0x0000},
22668d318a50SLinus Walleij 
22678d318a50SLinus Walleij 		/* PCell Id */
22688d318a50SLinus Walleij 		{ .reg = D40_DREG_CELLID0, .val = 0x000d},
22698d318a50SLinus Walleij 		{ .reg = D40_DREG_CELLID1, .val = 0x00f0},
22708d318a50SLinus Walleij 		{ .reg = D40_DREG_CELLID2, .val = 0x0005},
22718d318a50SLinus Walleij 		{ .reg = D40_DREG_CELLID3, .val = 0x00b1}
22728d318a50SLinus Walleij 	};
22738d318a50SLinus Walleij 	struct stedma40_platform_data *plat_data;
22748d318a50SLinus Walleij 	struct clk *clk = NULL;
22758d318a50SLinus Walleij 	void __iomem *virtbase = NULL;
22768d318a50SLinus Walleij 	struct resource *res = NULL;
22778d318a50SLinus Walleij 	struct d40_base *base = NULL;
22788d318a50SLinus Walleij 	int num_log_chans = 0;
22798d318a50SLinus Walleij 	int num_phy_chans;
22808d318a50SLinus Walleij 	int i;
22818d318a50SLinus Walleij 
22828d318a50SLinus Walleij 	clk = clk_get(&pdev->dev, NULL);
22838d318a50SLinus Walleij 
22848d318a50SLinus Walleij 	if (IS_ERR(clk)) {
22858d318a50SLinus Walleij 		dev_err(&pdev->dev, "[%s] No matching clock found\n",
22868d318a50SLinus Walleij 			__func__);
22878d318a50SLinus Walleij 		goto failure;
22888d318a50SLinus Walleij 	}
22898d318a50SLinus Walleij 
22908d318a50SLinus Walleij 	clk_enable(clk);
22918d318a50SLinus Walleij 
22928d318a50SLinus Walleij 	/* Get IO for DMAC base address */
22938d318a50SLinus Walleij 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
22948d318a50SLinus Walleij 	if (!res)
22958d318a50SLinus Walleij 		goto failure;
22968d318a50SLinus Walleij 
22978d318a50SLinus Walleij 	if (request_mem_region(res->start, resource_size(res),
22988d318a50SLinus Walleij 			       D40_NAME " I/O base") == NULL)
22998d318a50SLinus Walleij 		goto failure;
23008d318a50SLinus Walleij 
23018d318a50SLinus Walleij 	virtbase = ioremap(res->start, resource_size(res));
23028d318a50SLinus Walleij 	if (!virtbase)
23038d318a50SLinus Walleij 		goto failure;
23048d318a50SLinus Walleij 
23058d318a50SLinus Walleij 	/* HW version check */
23068d318a50SLinus Walleij 	for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
23078d318a50SLinus Walleij 		if (dma_id_regs[i].val !=
23088d318a50SLinus Walleij 		    readl(virtbase + dma_id_regs[i].reg)) {
23098d318a50SLinus Walleij 			dev_err(&pdev->dev,
23108d318a50SLinus Walleij 				"[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
23118d318a50SLinus Walleij 				__func__,
23128d318a50SLinus Walleij 				dma_id_regs[i].val,
23138d318a50SLinus Walleij 				dma_id_regs[i].reg,
23148d318a50SLinus Walleij 				readl(virtbase + dma_id_regs[i].reg));
23158d318a50SLinus Walleij 			goto failure;
23168d318a50SLinus Walleij 		}
23178d318a50SLinus Walleij 	}
23188d318a50SLinus Walleij 
23198d318a50SLinus Walleij 	i = readl(virtbase + D40_DREG_PERIPHID2);
23208d318a50SLinus Walleij 
23218d318a50SLinus Walleij 	if ((i & 0xf) != D40_PERIPHID2_DESIGNER) {
23228d318a50SLinus Walleij 		dev_err(&pdev->dev,
23238d318a50SLinus Walleij 			"[%s] Unknown designer! Got %x wanted %x\n",
23248d318a50SLinus Walleij 			__func__, i & 0xf, D40_PERIPHID2_DESIGNER);
23258d318a50SLinus Walleij 		goto failure;
23268d318a50SLinus Walleij 	}
23278d318a50SLinus Walleij 
23288d318a50SLinus Walleij 	/* The number of physical channels on this HW */
23298d318a50SLinus Walleij 	num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
23308d318a50SLinus Walleij 
23318d318a50SLinus Walleij 	dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
23328d318a50SLinus Walleij 		 (i >> 4) & 0xf, res->start);
23338d318a50SLinus Walleij 
23348d318a50SLinus Walleij 	plat_data = pdev->dev.platform_data;
23358d318a50SLinus Walleij 
23368d318a50SLinus Walleij 	/* Count the number of logical channels in use */
23378d318a50SLinus Walleij 	for (i = 0; i < plat_data->dev_len; i++)
23388d318a50SLinus Walleij 		if (plat_data->dev_rx[i] != 0)
23398d318a50SLinus Walleij 			num_log_chans++;
23408d318a50SLinus Walleij 
23418d318a50SLinus Walleij 	for (i = 0; i < plat_data->dev_len; i++)
23428d318a50SLinus Walleij 		if (plat_data->dev_tx[i] != 0)
23438d318a50SLinus Walleij 			num_log_chans++;
23448d318a50SLinus Walleij 
23458d318a50SLinus Walleij 	base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
23468d318a50SLinus Walleij 		       (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
23478d318a50SLinus Walleij 		       sizeof(struct d40_chan), GFP_KERNEL);
23488d318a50SLinus Walleij 
23498d318a50SLinus Walleij 	if (base == NULL) {
23508d318a50SLinus Walleij 		dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
23518d318a50SLinus Walleij 		goto failure;
23528d318a50SLinus Walleij 	}
23538d318a50SLinus Walleij 
23548d318a50SLinus Walleij 	base->clk = clk;
23558d318a50SLinus Walleij 	base->num_phy_chans = num_phy_chans;
23568d318a50SLinus Walleij 	base->num_log_chans = num_log_chans;
23578d318a50SLinus Walleij 	base->phy_start = res->start;
23588d318a50SLinus Walleij 	base->phy_size = resource_size(res);
23598d318a50SLinus Walleij 	base->virtbase = virtbase;
23608d318a50SLinus Walleij 	base->plat_data = plat_data;
23618d318a50SLinus Walleij 	base->dev = &pdev->dev;
23628d318a50SLinus Walleij 	base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
23638d318a50SLinus Walleij 	base->log_chans = &base->phy_chans[num_phy_chans];
23648d318a50SLinus Walleij 
23658d318a50SLinus Walleij 	base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
23668d318a50SLinus Walleij 				GFP_KERNEL);
23678d318a50SLinus Walleij 	if (!base->phy_res)
23688d318a50SLinus Walleij 		goto failure;
23698d318a50SLinus Walleij 
23708d318a50SLinus Walleij 	base->lookup_phy_chans = kzalloc(num_phy_chans *
23718d318a50SLinus Walleij 					 sizeof(struct d40_chan *),
23728d318a50SLinus Walleij 					 GFP_KERNEL);
23738d318a50SLinus Walleij 	if (!base->lookup_phy_chans)
23748d318a50SLinus Walleij 		goto failure;
23758d318a50SLinus Walleij 
23768d318a50SLinus Walleij 	if (num_log_chans + plat_data->memcpy_len) {
23778d318a50SLinus Walleij 		/*
23788d318a50SLinus Walleij 		 * The max number of logical channels are event lines for all
23798d318a50SLinus Walleij 		 * src devices and dst devices
23808d318a50SLinus Walleij 		 */
23818d318a50SLinus Walleij 		base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
23828d318a50SLinus Walleij 						 sizeof(struct d40_chan *),
23838d318a50SLinus Walleij 						 GFP_KERNEL);
23848d318a50SLinus Walleij 		if (!base->lookup_log_chans)
23858d318a50SLinus Walleij 			goto failure;
23868d318a50SLinus Walleij 	}
23878d318a50SLinus Walleij 	base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32),
23888d318a50SLinus Walleij 					    GFP_KERNEL);
23898d318a50SLinus Walleij 	if (!base->lcla_pool.alloc_map)
23908d318a50SLinus Walleij 		goto failure;
23918d318a50SLinus Walleij 
2392c675b1b4SJonas Aaberg 	base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
2393c675b1b4SJonas Aaberg 					    0, SLAB_HWCACHE_ALIGN,
2394c675b1b4SJonas Aaberg 					    NULL);
2395c675b1b4SJonas Aaberg 	if (base->desc_slab == NULL)
2396c675b1b4SJonas Aaberg 		goto failure;
2397c675b1b4SJonas Aaberg 
23988d318a50SLinus Walleij 	return base;
23998d318a50SLinus Walleij 
24008d318a50SLinus Walleij failure:
24018d318a50SLinus Walleij 	if (clk) {
24028d318a50SLinus Walleij 		clk_disable(clk);
24038d318a50SLinus Walleij 		clk_put(clk);
24048d318a50SLinus Walleij 	}
24058d318a50SLinus Walleij 	if (virtbase)
24068d318a50SLinus Walleij 		iounmap(virtbase);
24078d318a50SLinus Walleij 	if (res)
24088d318a50SLinus Walleij 		release_mem_region(res->start,
24098d318a50SLinus Walleij 				   resource_size(res));
24108d318a50SLinus Walleij 	if (virtbase)
24118d318a50SLinus Walleij 		iounmap(virtbase);
24128d318a50SLinus Walleij 
24138d318a50SLinus Walleij 	if (base) {
24148d318a50SLinus Walleij 		kfree(base->lcla_pool.alloc_map);
24158d318a50SLinus Walleij 		kfree(base->lookup_log_chans);
24168d318a50SLinus Walleij 		kfree(base->lookup_phy_chans);
24178d318a50SLinus Walleij 		kfree(base->phy_res);
24188d318a50SLinus Walleij 		kfree(base);
24198d318a50SLinus Walleij 	}
24208d318a50SLinus Walleij 
24218d318a50SLinus Walleij 	return NULL;
24228d318a50SLinus Walleij }
24238d318a50SLinus Walleij 
24248d318a50SLinus Walleij static void __init d40_hw_init(struct d40_base *base)
24258d318a50SLinus Walleij {
24268d318a50SLinus Walleij 
24278d318a50SLinus Walleij 	static const struct d40_reg_val dma_init_reg[] = {
24288d318a50SLinus Walleij 		/* Clock every part of the DMA block from start */
24298d318a50SLinus Walleij 		{ .reg = D40_DREG_GCC,    .val = 0x0000ff01},
24308d318a50SLinus Walleij 
24318d318a50SLinus Walleij 		/* Interrupts on all logical channels */
24328d318a50SLinus Walleij 		{ .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
24338d318a50SLinus Walleij 		{ .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
24348d318a50SLinus Walleij 		{ .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
24358d318a50SLinus Walleij 		{ .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
24368d318a50SLinus Walleij 		{ .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
24378d318a50SLinus Walleij 		{ .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
24388d318a50SLinus Walleij 		{ .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
24398d318a50SLinus Walleij 		{ .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
24408d318a50SLinus Walleij 		{ .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
24418d318a50SLinus Walleij 		{ .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
24428d318a50SLinus Walleij 		{ .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
24438d318a50SLinus Walleij 		{ .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
24448d318a50SLinus Walleij 	};
24458d318a50SLinus Walleij 	int i;
24468d318a50SLinus Walleij 	u32 prmseo[2] = {0, 0};
24478d318a50SLinus Walleij 	u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
24488d318a50SLinus Walleij 	u32 pcmis = 0;
24498d318a50SLinus Walleij 	u32 pcicr = 0;
24508d318a50SLinus Walleij 
24518d318a50SLinus Walleij 	for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
24528d318a50SLinus Walleij 		writel(dma_init_reg[i].val,
24538d318a50SLinus Walleij 		       base->virtbase + dma_init_reg[i].reg);
24548d318a50SLinus Walleij 
24558d318a50SLinus Walleij 	/* Configure all our dma channels to default settings */
24568d318a50SLinus Walleij 	for (i = 0; i < base->num_phy_chans; i++) {
24578d318a50SLinus Walleij 
24588d318a50SLinus Walleij 		activeo[i % 2] = activeo[i % 2] << 2;
24598d318a50SLinus Walleij 
24608d318a50SLinus Walleij 		if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
24618d318a50SLinus Walleij 		    == D40_ALLOC_PHY) {
24628d318a50SLinus Walleij 			activeo[i % 2] |= 3;
24638d318a50SLinus Walleij 			continue;
24648d318a50SLinus Walleij 		}
24658d318a50SLinus Walleij 
24668d318a50SLinus Walleij 		/* Enable interrupt # */
24678d318a50SLinus Walleij 		pcmis = (pcmis << 1) | 1;
24688d318a50SLinus Walleij 
24698d318a50SLinus Walleij 		/* Clear interrupt # */
24708d318a50SLinus Walleij 		pcicr = (pcicr << 1) | 1;
24718d318a50SLinus Walleij 
24728d318a50SLinus Walleij 		/* Set channel to physical mode */
24738d318a50SLinus Walleij 		prmseo[i % 2] = prmseo[i % 2] << 2;
24748d318a50SLinus Walleij 		prmseo[i % 2] |= 1;
24758d318a50SLinus Walleij 
24768d318a50SLinus Walleij 	}
24778d318a50SLinus Walleij 
24788d318a50SLinus Walleij 	writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
24798d318a50SLinus Walleij 	writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
24808d318a50SLinus Walleij 	writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
24818d318a50SLinus Walleij 	writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
24828d318a50SLinus Walleij 
24838d318a50SLinus Walleij 	/* Write which interrupt to enable */
24848d318a50SLinus Walleij 	writel(pcmis, base->virtbase + D40_DREG_PCMIS);
24858d318a50SLinus Walleij 
24868d318a50SLinus Walleij 	/* Write which interrupt to clear */
24878d318a50SLinus Walleij 	writel(pcicr, base->virtbase + D40_DREG_PCICR);
24888d318a50SLinus Walleij 
24898d318a50SLinus Walleij }
24908d318a50SLinus Walleij 
2491508849adSLinus Walleij static int __init d40_lcla_allocate(struct d40_base *base)
2492508849adSLinus Walleij {
2493508849adSLinus Walleij 	unsigned long *page_list;
2494508849adSLinus Walleij 	int i, j;
2495508849adSLinus Walleij 	int ret = 0;
2496508849adSLinus Walleij 
2497508849adSLinus Walleij 	/*
2498508849adSLinus Walleij 	 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
2499508849adSLinus Walleij 	 * To full fill this hardware requirement without wasting 256 kb
2500508849adSLinus Walleij 	 * we allocate pages until we get an aligned one.
2501508849adSLinus Walleij 	 */
2502508849adSLinus Walleij 	page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
2503508849adSLinus Walleij 			    GFP_KERNEL);
2504508849adSLinus Walleij 
2505508849adSLinus Walleij 	if (!page_list) {
2506508849adSLinus Walleij 		ret = -ENOMEM;
2507508849adSLinus Walleij 		goto failure;
2508508849adSLinus Walleij 	}
2509508849adSLinus Walleij 
2510508849adSLinus Walleij 	/* Calculating how many pages that are required */
2511508849adSLinus Walleij 	base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
2512508849adSLinus Walleij 
2513508849adSLinus Walleij 	for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
2514508849adSLinus Walleij 		page_list[i] = __get_free_pages(GFP_KERNEL,
2515508849adSLinus Walleij 						base->lcla_pool.pages);
2516508849adSLinus Walleij 		if (!page_list[i]) {
2517508849adSLinus Walleij 
2518508849adSLinus Walleij 			dev_err(base->dev,
2519508849adSLinus Walleij 				"[%s] Failed to allocate %d pages.\n",
2520508849adSLinus Walleij 				__func__, base->lcla_pool.pages);
2521508849adSLinus Walleij 
2522508849adSLinus Walleij 			for (j = 0; j < i; j++)
2523508849adSLinus Walleij 				free_pages(page_list[j], base->lcla_pool.pages);
2524508849adSLinus Walleij 			goto failure;
2525508849adSLinus Walleij 		}
2526508849adSLinus Walleij 
2527508849adSLinus Walleij 		if ((virt_to_phys((void *)page_list[i]) &
2528508849adSLinus Walleij 		     (LCLA_ALIGNMENT - 1)) == 0)
2529508849adSLinus Walleij 			break;
2530508849adSLinus Walleij 	}
2531508849adSLinus Walleij 
2532508849adSLinus Walleij 	for (j = 0; j < i; j++)
2533508849adSLinus Walleij 		free_pages(page_list[j], base->lcla_pool.pages);
2534508849adSLinus Walleij 
2535508849adSLinus Walleij 	if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
2536508849adSLinus Walleij 		base->lcla_pool.base = (void *)page_list[i];
2537508849adSLinus Walleij 	} else {
2538508849adSLinus Walleij 		/* After many attempts, no succees with finding the correct
2539508849adSLinus Walleij 		 * alignment try with allocating a big buffer */
2540508849adSLinus Walleij 		dev_warn(base->dev,
2541508849adSLinus Walleij 			 "[%s] Failed to get %d pages @ 18 bit align.\n",
2542508849adSLinus Walleij 			 __func__, base->lcla_pool.pages);
2543508849adSLinus Walleij 		base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
2544508849adSLinus Walleij 							 base->num_phy_chans +
2545508849adSLinus Walleij 							 LCLA_ALIGNMENT,
2546508849adSLinus Walleij 							 GFP_KERNEL);
2547508849adSLinus Walleij 		if (!base->lcla_pool.base_unaligned) {
2548508849adSLinus Walleij 			ret = -ENOMEM;
2549508849adSLinus Walleij 			goto failure;
2550508849adSLinus Walleij 		}
2551508849adSLinus Walleij 
2552508849adSLinus Walleij 		base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
2553508849adSLinus Walleij 						 LCLA_ALIGNMENT);
2554508849adSLinus Walleij 	}
2555508849adSLinus Walleij 
2556508849adSLinus Walleij 	writel(virt_to_phys(base->lcla_pool.base),
2557508849adSLinus Walleij 	       base->virtbase + D40_DREG_LCLA);
2558508849adSLinus Walleij failure:
2559508849adSLinus Walleij 	kfree(page_list);
2560508849adSLinus Walleij 	return ret;
2561508849adSLinus Walleij }
2562508849adSLinus Walleij 
25638d318a50SLinus Walleij static int __init d40_probe(struct platform_device *pdev)
25648d318a50SLinus Walleij {
25658d318a50SLinus Walleij 	int err;
25668d318a50SLinus Walleij 	int ret = -ENOENT;
25678d318a50SLinus Walleij 	struct d40_base *base;
25688d318a50SLinus Walleij 	struct resource *res = NULL;
25698d318a50SLinus Walleij 	int num_reserved_chans;
25708d318a50SLinus Walleij 	u32 val;
25718d318a50SLinus Walleij 
25728d318a50SLinus Walleij 	base = d40_hw_detect_init(pdev);
25738d318a50SLinus Walleij 
25748d318a50SLinus Walleij 	if (!base)
25758d318a50SLinus Walleij 		goto failure;
25768d318a50SLinus Walleij 
25778d318a50SLinus Walleij 	num_reserved_chans = d40_phy_res_init(base);
25788d318a50SLinus Walleij 
25798d318a50SLinus Walleij 	platform_set_drvdata(pdev, base);
25808d318a50SLinus Walleij 
25818d318a50SLinus Walleij 	spin_lock_init(&base->interrupt_lock);
25828d318a50SLinus Walleij 	spin_lock_init(&base->execmd_lock);
25838d318a50SLinus Walleij 
25848d318a50SLinus Walleij 	/* Get IO for logical channel parameter address */
25858d318a50SLinus Walleij 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
25868d318a50SLinus Walleij 	if (!res) {
25878d318a50SLinus Walleij 		ret = -ENOENT;
25888d318a50SLinus Walleij 		dev_err(&pdev->dev,
25898d318a50SLinus Walleij 			"[%s] No \"lcpa\" memory resource\n",
25908d318a50SLinus Walleij 			__func__);
25918d318a50SLinus Walleij 		goto failure;
25928d318a50SLinus Walleij 	}
25938d318a50SLinus Walleij 	base->lcpa_size = resource_size(res);
25948d318a50SLinus Walleij 	base->phy_lcpa = res->start;
25958d318a50SLinus Walleij 
25968d318a50SLinus Walleij 	if (request_mem_region(res->start, resource_size(res),
25978d318a50SLinus Walleij 			       D40_NAME " I/O lcpa") == NULL) {
25988d318a50SLinus Walleij 		ret = -EBUSY;
25998d318a50SLinus Walleij 		dev_err(&pdev->dev,
26008d318a50SLinus Walleij 			"[%s] Failed to request LCPA region 0x%x-0x%x\n",
26018d318a50SLinus Walleij 			__func__, res->start, res->end);
26028d318a50SLinus Walleij 		goto failure;
26038d318a50SLinus Walleij 	}
26048d318a50SLinus Walleij 
26058d318a50SLinus Walleij 	/* We make use of ESRAM memory for this. */
26068d318a50SLinus Walleij 	val = readl(base->virtbase + D40_DREG_LCPA);
26078d318a50SLinus Walleij 	if (res->start != val && val != 0) {
26088d318a50SLinus Walleij 		dev_warn(&pdev->dev,
26098d318a50SLinus Walleij 			 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
26108d318a50SLinus Walleij 			 __func__, val, res->start);
26118d318a50SLinus Walleij 	} else
26128d318a50SLinus Walleij 		writel(res->start, base->virtbase + D40_DREG_LCPA);
26138d318a50SLinus Walleij 
26148d318a50SLinus Walleij 	base->lcpa_base = ioremap(res->start, resource_size(res));
26158d318a50SLinus Walleij 	if (!base->lcpa_base) {
26168d318a50SLinus Walleij 		ret = -ENOMEM;
26178d318a50SLinus Walleij 		dev_err(&pdev->dev,
26188d318a50SLinus Walleij 			"[%s] Failed to ioremap LCPA region\n",
26198d318a50SLinus Walleij 			__func__);
26208d318a50SLinus Walleij 		goto failure;
26218d318a50SLinus Walleij 	}
2622508849adSLinus Walleij 
2623508849adSLinus Walleij 	ret = d40_lcla_allocate(base);
2624508849adSLinus Walleij 	if (ret) {
2625508849adSLinus Walleij 		dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n",
26268d318a50SLinus Walleij 			__func__);
26278d318a50SLinus Walleij 		goto failure;
26288d318a50SLinus Walleij 	}
26298d318a50SLinus Walleij 
26308d318a50SLinus Walleij 	spin_lock_init(&base->lcla_pool.lock);
26318d318a50SLinus Walleij 
26328d318a50SLinus Walleij 	base->lcla_pool.num_blocks = base->num_phy_chans;
26338d318a50SLinus Walleij 
26348d318a50SLinus Walleij 	base->irq = platform_get_irq(pdev, 0);
26358d318a50SLinus Walleij 
26368d318a50SLinus Walleij 	ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
26378d318a50SLinus Walleij 
26388d318a50SLinus Walleij 	if (ret) {
26398d318a50SLinus Walleij 		dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
26408d318a50SLinus Walleij 		goto failure;
26418d318a50SLinus Walleij 	}
26428d318a50SLinus Walleij 
26438d318a50SLinus Walleij 	err = d40_dmaengine_init(base, num_reserved_chans);
26448d318a50SLinus Walleij 	if (err)
26458d318a50SLinus Walleij 		goto failure;
26468d318a50SLinus Walleij 
26478d318a50SLinus Walleij 	d40_hw_init(base);
26488d318a50SLinus Walleij 
26498d318a50SLinus Walleij 	dev_info(base->dev, "initialized\n");
26508d318a50SLinus Walleij 	return 0;
26518d318a50SLinus Walleij 
26528d318a50SLinus Walleij failure:
26538d318a50SLinus Walleij 	if (base) {
2654c675b1b4SJonas Aaberg 		if (base->desc_slab)
2655c675b1b4SJonas Aaberg 			kmem_cache_destroy(base->desc_slab);
26568d318a50SLinus Walleij 		if (base->virtbase)
26578d318a50SLinus Walleij 			iounmap(base->virtbase);
2658508849adSLinus Walleij 		if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
2659508849adSLinus Walleij 			free_pages((unsigned long)base->lcla_pool.base,
2660508849adSLinus Walleij 				   base->lcla_pool.pages);
2661508849adSLinus Walleij 		if (base->lcla_pool.base_unaligned)
2662508849adSLinus Walleij 			kfree(base->lcla_pool.base_unaligned);
26638d318a50SLinus Walleij 		if (base->phy_lcpa)
26648d318a50SLinus Walleij 			release_mem_region(base->phy_lcpa,
26658d318a50SLinus Walleij 					   base->lcpa_size);
26668d318a50SLinus Walleij 		if (base->phy_start)
26678d318a50SLinus Walleij 			release_mem_region(base->phy_start,
26688d318a50SLinus Walleij 					   base->phy_size);
26698d318a50SLinus Walleij 		if (base->clk) {
26708d318a50SLinus Walleij 			clk_disable(base->clk);
26718d318a50SLinus Walleij 			clk_put(base->clk);
26728d318a50SLinus Walleij 		}
26738d318a50SLinus Walleij 
26748d318a50SLinus Walleij 		kfree(base->lcla_pool.alloc_map);
26758d318a50SLinus Walleij 		kfree(base->lookup_log_chans);
26768d318a50SLinus Walleij 		kfree(base->lookup_phy_chans);
26778d318a50SLinus Walleij 		kfree(base->phy_res);
26788d318a50SLinus Walleij 		kfree(base);
26798d318a50SLinus Walleij 	}
26808d318a50SLinus Walleij 
26818d318a50SLinus Walleij 	dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
26828d318a50SLinus Walleij 	return ret;
26838d318a50SLinus Walleij }
26848d318a50SLinus Walleij 
26858d318a50SLinus Walleij static struct platform_driver d40_driver = {
26868d318a50SLinus Walleij 	.driver = {
26878d318a50SLinus Walleij 		.owner = THIS_MODULE,
26888d318a50SLinus Walleij 		.name  = D40_NAME,
26898d318a50SLinus Walleij 	},
26908d318a50SLinus Walleij };
26918d318a50SLinus Walleij 
26928d318a50SLinus Walleij int __init stedma40_init(void)
26938d318a50SLinus Walleij {
26948d318a50SLinus Walleij 	return platform_driver_probe(&d40_driver, d40_probe);
26958d318a50SLinus Walleij }
26968d318a50SLinus Walleij arch_initcall(stedma40_init);
2697