xref: /linux/drivers/dma/ste_dma40.c (revision 38bdbf020ad7ae1bca564a7db238cdf8b2f462a8)
18d318a50SLinus Walleij /*
2767a9675SJonas Aaberg  * Copyright (C) ST-Ericsson SA 2007-2010
3661385f9SPer Forlin  * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
4767a9675SJonas Aaberg  * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
58d318a50SLinus Walleij  * License terms: GNU General Public License (GPL) version 2
68d318a50SLinus Walleij  */
78d318a50SLinus Walleij 
88d318a50SLinus Walleij #include <linux/kernel.h>
98d318a50SLinus Walleij #include <linux/slab.h>
108d318a50SLinus Walleij #include <linux/dmaengine.h>
118d318a50SLinus Walleij #include <linux/platform_device.h>
128d318a50SLinus Walleij #include <linux/clk.h>
138d318a50SLinus Walleij #include <linux/delay.h>
14698e4732SJonas Aaberg #include <linux/err.h>
158d318a50SLinus Walleij 
168d318a50SLinus Walleij #include <plat/ste_dma40.h>
178d318a50SLinus Walleij 
188d318a50SLinus Walleij #include "ste_dma40_ll.h"
198d318a50SLinus Walleij 
208d318a50SLinus Walleij #define D40_NAME "dma40"
218d318a50SLinus Walleij 
228d318a50SLinus Walleij #define D40_PHY_CHAN -1
238d318a50SLinus Walleij 
248d318a50SLinus Walleij /* For masking out/in 2 bit channel positions */
258d318a50SLinus Walleij #define D40_CHAN_POS(chan)  (2 * (chan / 2))
268d318a50SLinus Walleij #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
278d318a50SLinus Walleij 
288d318a50SLinus Walleij /* Maximum iterations taken before giving up suspending a channel */
298d318a50SLinus Walleij #define D40_SUSPEND_MAX_IT 500
308d318a50SLinus Walleij 
31508849adSLinus Walleij /* Hardware requirement on LCLA alignment */
32508849adSLinus Walleij #define LCLA_ALIGNMENT 0x40000
33698e4732SJonas Aaberg 
34698e4732SJonas Aaberg /* Max number of links per event group */
35698e4732SJonas Aaberg #define D40_LCLA_LINK_PER_EVENT_GRP 128
36698e4732SJonas Aaberg #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
37698e4732SJonas Aaberg 
38508849adSLinus Walleij /* Attempts before giving up to trying to get pages that are aligned */
39508849adSLinus Walleij #define MAX_LCLA_ALLOC_ATTEMPTS 256
40508849adSLinus Walleij 
41508849adSLinus Walleij /* Bit markings for allocation map */
428d318a50SLinus Walleij #define D40_ALLOC_FREE		(1 << 31)
438d318a50SLinus Walleij #define D40_ALLOC_PHY		(1 << 30)
448d318a50SLinus Walleij #define D40_ALLOC_LOG_FREE	0
458d318a50SLinus Walleij 
468d318a50SLinus Walleij /* Hardware designer of the block */
473ae0267fSJonas Aaberg #define D40_HW_DESIGNER 0x8
488d318a50SLinus Walleij 
498d318a50SLinus Walleij /**
508d318a50SLinus Walleij  * enum 40_command - The different commands and/or statuses.
518d318a50SLinus Walleij  *
528d318a50SLinus Walleij  * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
538d318a50SLinus Walleij  * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
548d318a50SLinus Walleij  * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
558d318a50SLinus Walleij  * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
568d318a50SLinus Walleij  */
578d318a50SLinus Walleij enum d40_command {
588d318a50SLinus Walleij 	D40_DMA_STOP		= 0,
598d318a50SLinus Walleij 	D40_DMA_RUN		= 1,
608d318a50SLinus Walleij 	D40_DMA_SUSPEND_REQ	= 2,
618d318a50SLinus Walleij 	D40_DMA_SUSPENDED	= 3
628d318a50SLinus Walleij };
638d318a50SLinus Walleij 
648d318a50SLinus Walleij /**
658d318a50SLinus Walleij  * struct d40_lli_pool - Structure for keeping LLIs in memory
668d318a50SLinus Walleij  *
678d318a50SLinus Walleij  * @base: Pointer to memory area when the pre_alloc_lli's are not large
688d318a50SLinus Walleij  * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
698d318a50SLinus Walleij  * pre_alloc_lli is used.
708d318a50SLinus Walleij  * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
718d318a50SLinus Walleij  * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
728d318a50SLinus Walleij  * one buffer to one buffer.
738d318a50SLinus Walleij  */
748d318a50SLinus Walleij struct d40_lli_pool {
758d318a50SLinus Walleij 	void	*base;
768d318a50SLinus Walleij 	int	 size;
778d318a50SLinus Walleij 	/* Space for dst and src, plus an extra for padding */
788d318a50SLinus Walleij 	u8	 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
798d318a50SLinus Walleij };
808d318a50SLinus Walleij 
818d318a50SLinus Walleij /**
828d318a50SLinus Walleij  * struct d40_desc - A descriptor is one DMA job.
838d318a50SLinus Walleij  *
848d318a50SLinus Walleij  * @lli_phy: LLI settings for physical channel. Both src and dst=
858d318a50SLinus Walleij  * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
868d318a50SLinus Walleij  * lli_len equals one.
878d318a50SLinus Walleij  * @lli_log: Same as above but for logical channels.
888d318a50SLinus Walleij  * @lli_pool: The pool with two entries pre-allocated.
89941b77a3SPer Friden  * @lli_len: Number of llis of current descriptor.
90698e4732SJonas Aaberg  * @lli_current: Number of transfered llis.
91698e4732SJonas Aaberg  * @lcla_alloc: Number of LCLA entries allocated.
928d318a50SLinus Walleij  * @txd: DMA engine struct. Used for among other things for communication
938d318a50SLinus Walleij  * during a transfer.
948d318a50SLinus Walleij  * @node: List entry.
958d318a50SLinus Walleij  * @is_in_client_list: true if the client owns this descriptor.
96aa182ae2SJonas Aaberg  * @is_hw_linked: true if this job will automatically be continued for
97aa182ae2SJonas Aaberg  * the previous one.
988d318a50SLinus Walleij  *
998d318a50SLinus Walleij  * This descriptor is used for both logical and physical transfers.
1008d318a50SLinus Walleij  */
1018d318a50SLinus Walleij struct d40_desc {
1028d318a50SLinus Walleij 	/* LLI physical */
1038d318a50SLinus Walleij 	struct d40_phy_lli_bidir	 lli_phy;
1048d318a50SLinus Walleij 	/* LLI logical */
1058d318a50SLinus Walleij 	struct d40_log_lli_bidir	 lli_log;
1068d318a50SLinus Walleij 
1078d318a50SLinus Walleij 	struct d40_lli_pool		 lli_pool;
108941b77a3SPer Friden 	int				 lli_len;
109698e4732SJonas Aaberg 	int				 lli_current;
110698e4732SJonas Aaberg 	int				 lcla_alloc;
1118d318a50SLinus Walleij 
1128d318a50SLinus Walleij 	struct dma_async_tx_descriptor	 txd;
1138d318a50SLinus Walleij 	struct list_head		 node;
1148d318a50SLinus Walleij 
1158d318a50SLinus Walleij 	bool				 is_in_client_list;
116aa182ae2SJonas Aaberg 	bool				 is_hw_linked;
1178d318a50SLinus Walleij };
1188d318a50SLinus Walleij 
1198d318a50SLinus Walleij /**
1208d318a50SLinus Walleij  * struct d40_lcla_pool - LCLA pool settings and data.
1218d318a50SLinus Walleij  *
122508849adSLinus Walleij  * @base: The virtual address of LCLA. 18 bit aligned.
123508849adSLinus Walleij  * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
124508849adSLinus Walleij  * This pointer is only there for clean-up on error.
125508849adSLinus Walleij  * @pages: The number of pages needed for all physical channels.
126508849adSLinus Walleij  * Only used later for clean-up on error
1278d318a50SLinus Walleij  * @lock: Lock to protect the content in this struct.
128698e4732SJonas Aaberg  * @alloc_map: big map over which LCLA entry is own by which job.
1298d318a50SLinus Walleij  */
1308d318a50SLinus Walleij struct d40_lcla_pool {
1318d318a50SLinus Walleij 	void		*base;
132508849adSLinus Walleij 	void		*base_unaligned;
133508849adSLinus Walleij 	int		 pages;
1348d318a50SLinus Walleij 	spinlock_t	 lock;
135698e4732SJonas Aaberg 	struct d40_desc	**alloc_map;
1368d318a50SLinus Walleij };
1378d318a50SLinus Walleij 
1388d318a50SLinus Walleij /**
1398d318a50SLinus Walleij  * struct d40_phy_res - struct for handling eventlines mapped to physical
1408d318a50SLinus Walleij  * channels.
1418d318a50SLinus Walleij  *
1428d318a50SLinus Walleij  * @lock: A lock protection this entity.
1438d318a50SLinus Walleij  * @num: The physical channel number of this entity.
1448d318a50SLinus Walleij  * @allocated_src: Bit mapped to show which src event line's are mapped to
1458d318a50SLinus Walleij  * this physical channel. Can also be free or physically allocated.
1468d318a50SLinus Walleij  * @allocated_dst: Same as for src but is dst.
1478d318a50SLinus Walleij  * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
148767a9675SJonas Aaberg  * event line number.
1498d318a50SLinus Walleij  */
1508d318a50SLinus Walleij struct d40_phy_res {
1518d318a50SLinus Walleij 	spinlock_t lock;
1528d318a50SLinus Walleij 	int	   num;
1538d318a50SLinus Walleij 	u32	   allocated_src;
1548d318a50SLinus Walleij 	u32	   allocated_dst;
1558d318a50SLinus Walleij };
1568d318a50SLinus Walleij 
1578d318a50SLinus Walleij struct d40_base;
1588d318a50SLinus Walleij 
1598d318a50SLinus Walleij /**
1608d318a50SLinus Walleij  * struct d40_chan - Struct that describes a channel.
1618d318a50SLinus Walleij  *
1628d318a50SLinus Walleij  * @lock: A spinlock to protect this struct.
1638d318a50SLinus Walleij  * @log_num: The logical number, if any of this channel.
1648d318a50SLinus Walleij  * @completed: Starts with 1, after first interrupt it is set to dma engine's
1658d318a50SLinus Walleij  * current cookie.
1668d318a50SLinus Walleij  * @pending_tx: The number of pending transfers. Used between interrupt handler
1678d318a50SLinus Walleij  * and tasklet.
1688d318a50SLinus Walleij  * @busy: Set to true when transfer is ongoing on this channel.
1692a614340SJonas Aaberg  * @phy_chan: Pointer to physical channel which this instance runs on. If this
1702a614340SJonas Aaberg  * point is NULL, then the channel is not allocated.
1718d318a50SLinus Walleij  * @chan: DMA engine handle.
1728d318a50SLinus Walleij  * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
1738d318a50SLinus Walleij  * transfer and call client callback.
1748d318a50SLinus Walleij  * @client: Cliented owned descriptor list.
1758d318a50SLinus Walleij  * @active: Active descriptor.
1768d318a50SLinus Walleij  * @queue: Queued jobs.
1778d318a50SLinus Walleij  * @dma_cfg: The client configuration of this dma channel.
178ce2ca125SRabin Vincent  * @configured: whether the dma_cfg configuration is valid
1798d318a50SLinus Walleij  * @base: Pointer to the device instance struct.
1808d318a50SLinus Walleij  * @src_def_cfg: Default cfg register setting for src.
1818d318a50SLinus Walleij  * @dst_def_cfg: Default cfg register setting for dst.
1828d318a50SLinus Walleij  * @log_def: Default logical channel settings.
1838d318a50SLinus Walleij  * @lcla: Space for one dst src pair for logical channel transfers.
1848d318a50SLinus Walleij  * @lcpa: Pointer to dst and src lcpa settings.
1858d318a50SLinus Walleij  *
1868d318a50SLinus Walleij  * This struct can either "be" a logical or a physical channel.
1878d318a50SLinus Walleij  */
1888d318a50SLinus Walleij struct d40_chan {
1898d318a50SLinus Walleij 	spinlock_t			 lock;
1908d318a50SLinus Walleij 	int				 log_num;
1918d318a50SLinus Walleij 	/* ID of the most recent completed transfer */
1928d318a50SLinus Walleij 	int				 completed;
1938d318a50SLinus Walleij 	int				 pending_tx;
1948d318a50SLinus Walleij 	bool				 busy;
1958d318a50SLinus Walleij 	struct d40_phy_res		*phy_chan;
1968d318a50SLinus Walleij 	struct dma_chan			 chan;
1978d318a50SLinus Walleij 	struct tasklet_struct		 tasklet;
1988d318a50SLinus Walleij 	struct list_head		 client;
1998d318a50SLinus Walleij 	struct list_head		 active;
2008d318a50SLinus Walleij 	struct list_head		 queue;
2018d318a50SLinus Walleij 	struct stedma40_chan_cfg	 dma_cfg;
202ce2ca125SRabin Vincent 	bool				 configured;
2038d318a50SLinus Walleij 	struct d40_base			*base;
2048d318a50SLinus Walleij 	/* Default register configurations */
2058d318a50SLinus Walleij 	u32				 src_def_cfg;
2068d318a50SLinus Walleij 	u32				 dst_def_cfg;
2078d318a50SLinus Walleij 	struct d40_def_lcsp		 log_def;
2088d318a50SLinus Walleij 	struct d40_log_lli_full		*lcpa;
20995e1400fSLinus Walleij 	/* Runtime reconfiguration */
21095e1400fSLinus Walleij 	dma_addr_t			runtime_addr;
21195e1400fSLinus Walleij 	enum dma_data_direction		runtime_direction;
2128d318a50SLinus Walleij };
2138d318a50SLinus Walleij 
2148d318a50SLinus Walleij /**
2158d318a50SLinus Walleij  * struct d40_base - The big global struct, one for each probe'd instance.
2168d318a50SLinus Walleij  *
2178d318a50SLinus Walleij  * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
2188d318a50SLinus Walleij  * @execmd_lock: Lock for execute command usage since several channels share
2198d318a50SLinus Walleij  * the same physical register.
2208d318a50SLinus Walleij  * @dev: The device structure.
2218d318a50SLinus Walleij  * @virtbase: The virtual base address of the DMA's register.
222f4185592SLinus Walleij  * @rev: silicon revision detected.
2238d318a50SLinus Walleij  * @clk: Pointer to the DMA clock structure.
2248d318a50SLinus Walleij  * @phy_start: Physical memory start of the DMA registers.
2258d318a50SLinus Walleij  * @phy_size: Size of the DMA register map.
2268d318a50SLinus Walleij  * @irq: The IRQ number.
2278d318a50SLinus Walleij  * @num_phy_chans: The number of physical channels. Read from HW. This
2288d318a50SLinus Walleij  * is the number of available channels for this driver, not counting "Secure
2298d318a50SLinus Walleij  * mode" allocated physical channels.
2308d318a50SLinus Walleij  * @num_log_chans: The number of logical channels. Calculated from
2318d318a50SLinus Walleij  * num_phy_chans.
2328d318a50SLinus Walleij  * @dma_both: dma_device channels that can do both memcpy and slave transfers.
2338d318a50SLinus Walleij  * @dma_slave: dma_device channels that can do only do slave transfers.
2348d318a50SLinus Walleij  * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
2358d318a50SLinus Walleij  * @log_chans: Room for all possible logical channels in system.
2368d318a50SLinus Walleij  * @lookup_log_chans: Used to map interrupt number to logical channel. Points
2378d318a50SLinus Walleij  * to log_chans entries.
2388d318a50SLinus Walleij  * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
2398d318a50SLinus Walleij  * to phy_chans entries.
2408d318a50SLinus Walleij  * @plat_data: Pointer to provided platform_data which is the driver
2418d318a50SLinus Walleij  * configuration.
2428d318a50SLinus Walleij  * @phy_res: Vector containing all physical channels.
2438d318a50SLinus Walleij  * @lcla_pool: lcla pool settings and data.
2448d318a50SLinus Walleij  * @lcpa_base: The virtual mapped address of LCPA.
2458d318a50SLinus Walleij  * @phy_lcpa: The physical address of the LCPA.
2468d318a50SLinus Walleij  * @lcpa_size: The size of the LCPA area.
247c675b1b4SJonas Aaberg  * @desc_slab: cache for descriptors.
2488d318a50SLinus Walleij  */
2498d318a50SLinus Walleij struct d40_base {
2508d318a50SLinus Walleij 	spinlock_t			 interrupt_lock;
2518d318a50SLinus Walleij 	spinlock_t			 execmd_lock;
2528d318a50SLinus Walleij 	struct device			 *dev;
2538d318a50SLinus Walleij 	void __iomem			 *virtbase;
254f4185592SLinus Walleij 	u8				  rev:4;
2558d318a50SLinus Walleij 	struct clk			 *clk;
2568d318a50SLinus Walleij 	phys_addr_t			  phy_start;
2578d318a50SLinus Walleij 	resource_size_t			  phy_size;
2588d318a50SLinus Walleij 	int				  irq;
2598d318a50SLinus Walleij 	int				  num_phy_chans;
2608d318a50SLinus Walleij 	int				  num_log_chans;
2618d318a50SLinus Walleij 	struct dma_device		  dma_both;
2628d318a50SLinus Walleij 	struct dma_device		  dma_slave;
2638d318a50SLinus Walleij 	struct dma_device		  dma_memcpy;
2648d318a50SLinus Walleij 	struct d40_chan			 *phy_chans;
2658d318a50SLinus Walleij 	struct d40_chan			 *log_chans;
2668d318a50SLinus Walleij 	struct d40_chan			**lookup_log_chans;
2678d318a50SLinus Walleij 	struct d40_chan			**lookup_phy_chans;
2688d318a50SLinus Walleij 	struct stedma40_platform_data	 *plat_data;
2698d318a50SLinus Walleij 	/* Physical half channels */
2708d318a50SLinus Walleij 	struct d40_phy_res		 *phy_res;
2718d318a50SLinus Walleij 	struct d40_lcla_pool		  lcla_pool;
2728d318a50SLinus Walleij 	void				 *lcpa_base;
2738d318a50SLinus Walleij 	dma_addr_t			  phy_lcpa;
2748d318a50SLinus Walleij 	resource_size_t			  lcpa_size;
275c675b1b4SJonas Aaberg 	struct kmem_cache		 *desc_slab;
2768d318a50SLinus Walleij };
2778d318a50SLinus Walleij 
2788d318a50SLinus Walleij /**
2798d318a50SLinus Walleij  * struct d40_interrupt_lookup - lookup table for interrupt handler
2808d318a50SLinus Walleij  *
2818d318a50SLinus Walleij  * @src: Interrupt mask register.
2828d318a50SLinus Walleij  * @clr: Interrupt clear register.
2838d318a50SLinus Walleij  * @is_error: true if this is an error interrupt.
2848d318a50SLinus Walleij  * @offset: start delta in the lookup_log_chans in d40_base. If equals to
2858d318a50SLinus Walleij  * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
2868d318a50SLinus Walleij  */
2878d318a50SLinus Walleij struct d40_interrupt_lookup {
2888d318a50SLinus Walleij 	u32 src;
2898d318a50SLinus Walleij 	u32 clr;
2908d318a50SLinus Walleij 	bool is_error;
2918d318a50SLinus Walleij 	int offset;
2928d318a50SLinus Walleij };
2938d318a50SLinus Walleij 
2948d318a50SLinus Walleij /**
2958d318a50SLinus Walleij  * struct d40_reg_val - simple lookup struct
2968d318a50SLinus Walleij  *
2978d318a50SLinus Walleij  * @reg: The register.
2988d318a50SLinus Walleij  * @val: The value that belongs to the register in reg.
2998d318a50SLinus Walleij  */
3008d318a50SLinus Walleij struct d40_reg_val {
3018d318a50SLinus Walleij 	unsigned int reg;
3028d318a50SLinus Walleij 	unsigned int val;
3038d318a50SLinus Walleij };
3048d318a50SLinus Walleij 
3058d318a50SLinus Walleij static int d40_pool_lli_alloc(struct d40_desc *d40d,
3068d318a50SLinus Walleij 			      int lli_len, bool is_log)
3078d318a50SLinus Walleij {
3088d318a50SLinus Walleij 	u32 align;
3098d318a50SLinus Walleij 	void *base;
3108d318a50SLinus Walleij 
3118d318a50SLinus Walleij 	if (is_log)
3128d318a50SLinus Walleij 		align = sizeof(struct d40_log_lli);
3138d318a50SLinus Walleij 	else
3148d318a50SLinus Walleij 		align = sizeof(struct d40_phy_lli);
3158d318a50SLinus Walleij 
3168d318a50SLinus Walleij 	if (lli_len == 1) {
3178d318a50SLinus Walleij 		base = d40d->lli_pool.pre_alloc_lli;
3188d318a50SLinus Walleij 		d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
3198d318a50SLinus Walleij 		d40d->lli_pool.base = NULL;
3208d318a50SLinus Walleij 	} else {
3218d318a50SLinus Walleij 		d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
3228d318a50SLinus Walleij 
3238d318a50SLinus Walleij 		base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
3248d318a50SLinus Walleij 		d40d->lli_pool.base = base;
3258d318a50SLinus Walleij 
3268d318a50SLinus Walleij 		if (d40d->lli_pool.base == NULL)
3278d318a50SLinus Walleij 			return -ENOMEM;
3288d318a50SLinus Walleij 	}
3298d318a50SLinus Walleij 
3308d318a50SLinus Walleij 	if (is_log) {
3318d318a50SLinus Walleij 		d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
3328d318a50SLinus Walleij 					      align);
3338d318a50SLinus Walleij 		d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
3348d318a50SLinus Walleij 					      align);
3358d318a50SLinus Walleij 	} else {
3368d318a50SLinus Walleij 		d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
3378d318a50SLinus Walleij 					      align);
3388d318a50SLinus Walleij 		d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
3398d318a50SLinus Walleij 					      align);
3408d318a50SLinus Walleij 	}
3418d318a50SLinus Walleij 
3428d318a50SLinus Walleij 	return 0;
3438d318a50SLinus Walleij }
3448d318a50SLinus Walleij 
3458d318a50SLinus Walleij static void d40_pool_lli_free(struct d40_desc *d40d)
3468d318a50SLinus Walleij {
3478d318a50SLinus Walleij 	kfree(d40d->lli_pool.base);
3488d318a50SLinus Walleij 	d40d->lli_pool.base = NULL;
3498d318a50SLinus Walleij 	d40d->lli_pool.size = 0;
3508d318a50SLinus Walleij 	d40d->lli_log.src = NULL;
3518d318a50SLinus Walleij 	d40d->lli_log.dst = NULL;
3528d318a50SLinus Walleij 	d40d->lli_phy.src = NULL;
3538d318a50SLinus Walleij 	d40d->lli_phy.dst = NULL;
3548d318a50SLinus Walleij }
3558d318a50SLinus Walleij 
356698e4732SJonas Aaberg static int d40_lcla_alloc_one(struct d40_chan *d40c,
357698e4732SJonas Aaberg 			      struct d40_desc *d40d)
358698e4732SJonas Aaberg {
359698e4732SJonas Aaberg 	unsigned long flags;
360698e4732SJonas Aaberg 	int i;
361698e4732SJonas Aaberg 	int ret = -EINVAL;
362698e4732SJonas Aaberg 	int p;
363698e4732SJonas Aaberg 
364698e4732SJonas Aaberg 	spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
365698e4732SJonas Aaberg 
366698e4732SJonas Aaberg 	p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP;
367698e4732SJonas Aaberg 
368698e4732SJonas Aaberg 	/*
369698e4732SJonas Aaberg 	 * Allocate both src and dst at the same time, therefore the half
370698e4732SJonas Aaberg 	 * start on 1 since 0 can't be used since zero is used as end marker.
371698e4732SJonas Aaberg 	 */
372698e4732SJonas Aaberg 	for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
373698e4732SJonas Aaberg 		if (!d40c->base->lcla_pool.alloc_map[p + i]) {
374698e4732SJonas Aaberg 			d40c->base->lcla_pool.alloc_map[p + i] = d40d;
375698e4732SJonas Aaberg 			d40d->lcla_alloc++;
376698e4732SJonas Aaberg 			ret = i;
377698e4732SJonas Aaberg 			break;
378698e4732SJonas Aaberg 		}
379698e4732SJonas Aaberg 	}
380698e4732SJonas Aaberg 
381698e4732SJonas Aaberg 	spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
382698e4732SJonas Aaberg 
383698e4732SJonas Aaberg 	return ret;
384698e4732SJonas Aaberg }
385698e4732SJonas Aaberg 
386698e4732SJonas Aaberg static int d40_lcla_free_all(struct d40_chan *d40c,
387698e4732SJonas Aaberg 			     struct d40_desc *d40d)
388698e4732SJonas Aaberg {
389698e4732SJonas Aaberg 	unsigned long flags;
390698e4732SJonas Aaberg 	int i;
391698e4732SJonas Aaberg 	int ret = -EINVAL;
392698e4732SJonas Aaberg 
393698e4732SJonas Aaberg 	if (d40c->log_num == D40_PHY_CHAN)
394698e4732SJonas Aaberg 		return 0;
395698e4732SJonas Aaberg 
396698e4732SJonas Aaberg 	spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
397698e4732SJonas Aaberg 
398698e4732SJonas Aaberg 	for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
399698e4732SJonas Aaberg 		if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
400698e4732SJonas Aaberg 						    D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) {
401698e4732SJonas Aaberg 			d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
402698e4732SJonas Aaberg 							D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL;
403698e4732SJonas Aaberg 			d40d->lcla_alloc--;
404698e4732SJonas Aaberg 			if (d40d->lcla_alloc == 0) {
405698e4732SJonas Aaberg 				ret = 0;
406698e4732SJonas Aaberg 				break;
407698e4732SJonas Aaberg 			}
408698e4732SJonas Aaberg 		}
409698e4732SJonas Aaberg 	}
410698e4732SJonas Aaberg 
411698e4732SJonas Aaberg 	spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
412698e4732SJonas Aaberg 
413698e4732SJonas Aaberg 	return ret;
414698e4732SJonas Aaberg 
415698e4732SJonas Aaberg }
416698e4732SJonas Aaberg 
4178d318a50SLinus Walleij static void d40_desc_remove(struct d40_desc *d40d)
4188d318a50SLinus Walleij {
4198d318a50SLinus Walleij 	list_del(&d40d->node);
4208d318a50SLinus Walleij }
4218d318a50SLinus Walleij 
4228d318a50SLinus Walleij static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
4238d318a50SLinus Walleij {
424a2c15fa4SRabin Vincent 	struct d40_desc *desc = NULL;
425a2c15fa4SRabin Vincent 
426a2c15fa4SRabin Vincent 	if (!list_empty(&d40c->client)) {
4278d318a50SLinus Walleij 		struct d40_desc *d;
4288d318a50SLinus Walleij 		struct d40_desc *_d;
4298d318a50SLinus Walleij 
4308d318a50SLinus Walleij 		list_for_each_entry_safe(d, _d, &d40c->client, node)
4318d318a50SLinus Walleij 			if (async_tx_test_ack(&d->txd)) {
4328d318a50SLinus Walleij 				d40_pool_lli_free(d);
4338d318a50SLinus Walleij 				d40_desc_remove(d);
434a2c15fa4SRabin Vincent 				desc = d;
435a2c15fa4SRabin Vincent 				memset(desc, 0, sizeof(*desc));
436c675b1b4SJonas Aaberg 				break;
4378d318a50SLinus Walleij 			}
4388d318a50SLinus Walleij 	}
439a2c15fa4SRabin Vincent 
440a2c15fa4SRabin Vincent 	if (!desc)
441a2c15fa4SRabin Vincent 		desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
442a2c15fa4SRabin Vincent 
443a2c15fa4SRabin Vincent 	if (desc)
444a2c15fa4SRabin Vincent 		INIT_LIST_HEAD(&desc->node);
445a2c15fa4SRabin Vincent 
446a2c15fa4SRabin Vincent 	return desc;
4478d318a50SLinus Walleij }
4488d318a50SLinus Walleij 
4498d318a50SLinus Walleij static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
4508d318a50SLinus Walleij {
451698e4732SJonas Aaberg 
452698e4732SJonas Aaberg 	d40_lcla_free_all(d40c, d40d);
453c675b1b4SJonas Aaberg 	kmem_cache_free(d40c->base->desc_slab, d40d);
4548d318a50SLinus Walleij }
4558d318a50SLinus Walleij 
4568d318a50SLinus Walleij static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
4578d318a50SLinus Walleij {
4588d318a50SLinus Walleij 	list_add_tail(&desc->node, &d40c->active);
4598d318a50SLinus Walleij }
4608d318a50SLinus Walleij 
461698e4732SJonas Aaberg static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
462698e4732SJonas Aaberg {
463698e4732SJonas Aaberg 	int curr_lcla = -EINVAL, next_lcla;
464698e4732SJonas Aaberg 
465698e4732SJonas Aaberg 	if (d40c->log_num == D40_PHY_CHAN) {
466698e4732SJonas Aaberg 		d40_phy_lli_write(d40c->base->virtbase,
467698e4732SJonas Aaberg 				  d40c->phy_chan->num,
468698e4732SJonas Aaberg 				  d40d->lli_phy.dst,
469698e4732SJonas Aaberg 				  d40d->lli_phy.src);
470698e4732SJonas Aaberg 		d40d->lli_current = d40d->lli_len;
471698e4732SJonas Aaberg 	} else {
472698e4732SJonas Aaberg 
473698e4732SJonas Aaberg 		if ((d40d->lli_len - d40d->lli_current) > 1)
474698e4732SJonas Aaberg 			curr_lcla = d40_lcla_alloc_one(d40c, d40d);
475698e4732SJonas Aaberg 
476698e4732SJonas Aaberg 		d40_log_lli_lcpa_write(d40c->lcpa,
477698e4732SJonas Aaberg 				       &d40d->lli_log.dst[d40d->lli_current],
478698e4732SJonas Aaberg 				       &d40d->lli_log.src[d40d->lli_current],
479698e4732SJonas Aaberg 				       curr_lcla);
480698e4732SJonas Aaberg 
481698e4732SJonas Aaberg 		d40d->lli_current++;
482698e4732SJonas Aaberg 		for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) {
483698e4732SJonas Aaberg 			struct d40_log_lli *lcla;
484698e4732SJonas Aaberg 
485698e4732SJonas Aaberg 			if (d40d->lli_current + 1 < d40d->lli_len)
486698e4732SJonas Aaberg 				next_lcla = d40_lcla_alloc_one(d40c, d40d);
487698e4732SJonas Aaberg 			else
488698e4732SJonas Aaberg 				next_lcla = -EINVAL;
489698e4732SJonas Aaberg 
490698e4732SJonas Aaberg 			lcla = d40c->base->lcla_pool.base +
491698e4732SJonas Aaberg 				d40c->phy_chan->num * 1024 +
492698e4732SJonas Aaberg 				8 * curr_lcla * 2;
493698e4732SJonas Aaberg 
494698e4732SJonas Aaberg 			d40_log_lli_lcla_write(lcla,
495698e4732SJonas Aaberg 					       &d40d->lli_log.dst[d40d->lli_current],
496698e4732SJonas Aaberg 					       &d40d->lli_log.src[d40d->lli_current],
497698e4732SJonas Aaberg 					       next_lcla);
498698e4732SJonas Aaberg 
499698e4732SJonas Aaberg 			(void) dma_map_single(d40c->base->dev, lcla,
500698e4732SJonas Aaberg 					      2 * sizeof(struct d40_log_lli),
501698e4732SJonas Aaberg 					      DMA_TO_DEVICE);
502698e4732SJonas Aaberg 
503698e4732SJonas Aaberg 			curr_lcla = next_lcla;
504698e4732SJonas Aaberg 
505698e4732SJonas Aaberg 			if (curr_lcla == -EINVAL) {
506698e4732SJonas Aaberg 				d40d->lli_current++;
507698e4732SJonas Aaberg 				break;
508698e4732SJonas Aaberg 			}
509698e4732SJonas Aaberg 
510698e4732SJonas Aaberg 		}
511698e4732SJonas Aaberg 	}
512698e4732SJonas Aaberg }
513698e4732SJonas Aaberg 
5148d318a50SLinus Walleij static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
5158d318a50SLinus Walleij {
5168d318a50SLinus Walleij 	struct d40_desc *d;
5178d318a50SLinus Walleij 
5188d318a50SLinus Walleij 	if (list_empty(&d40c->active))
5198d318a50SLinus Walleij 		return NULL;
5208d318a50SLinus Walleij 
5218d318a50SLinus Walleij 	d = list_first_entry(&d40c->active,
5228d318a50SLinus Walleij 			     struct d40_desc,
5238d318a50SLinus Walleij 			     node);
5248d318a50SLinus Walleij 	return d;
5258d318a50SLinus Walleij }
5268d318a50SLinus Walleij 
5278d318a50SLinus Walleij static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
5288d318a50SLinus Walleij {
5298d318a50SLinus Walleij 	list_add_tail(&desc->node, &d40c->queue);
5308d318a50SLinus Walleij }
5318d318a50SLinus Walleij 
5328d318a50SLinus Walleij static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
5338d318a50SLinus Walleij {
5348d318a50SLinus Walleij 	struct d40_desc *d;
5358d318a50SLinus Walleij 
5368d318a50SLinus Walleij 	if (list_empty(&d40c->queue))
5378d318a50SLinus Walleij 		return NULL;
5388d318a50SLinus Walleij 
5398d318a50SLinus Walleij 	d = list_first_entry(&d40c->queue,
5408d318a50SLinus Walleij 			     struct d40_desc,
5418d318a50SLinus Walleij 			     node);
5428d318a50SLinus Walleij 	return d;
5438d318a50SLinus Walleij }
5448d318a50SLinus Walleij 
545aa182ae2SJonas Aaberg static struct d40_desc *d40_last_queued(struct d40_chan *d40c)
546aa182ae2SJonas Aaberg {
547aa182ae2SJonas Aaberg 	struct d40_desc *d;
548aa182ae2SJonas Aaberg 
549aa182ae2SJonas Aaberg 	if (list_empty(&d40c->queue))
550aa182ae2SJonas Aaberg 		return NULL;
551aa182ae2SJonas Aaberg 	list_for_each_entry(d, &d40c->queue, node)
552aa182ae2SJonas Aaberg 		if (list_is_last(&d->node, &d40c->queue))
553aa182ae2SJonas Aaberg 			break;
554aa182ae2SJonas Aaberg 	return d;
555aa182ae2SJonas Aaberg }
556aa182ae2SJonas Aaberg 
5578d318a50SLinus Walleij /* Support functions for logical channels */
5588d318a50SLinus Walleij 
5598d318a50SLinus Walleij 
5608d318a50SLinus Walleij static int d40_channel_execute_command(struct d40_chan *d40c,
5618d318a50SLinus Walleij 				       enum d40_command command)
5628d318a50SLinus Walleij {
563767a9675SJonas Aaberg 	u32 status;
564767a9675SJonas Aaberg 	int i;
5658d318a50SLinus Walleij 	void __iomem *active_reg;
5668d318a50SLinus Walleij 	int ret = 0;
5678d318a50SLinus Walleij 	unsigned long flags;
5681d392a7bSJonas Aaberg 	u32 wmask;
5698d318a50SLinus Walleij 
5708d318a50SLinus Walleij 	spin_lock_irqsave(&d40c->base->execmd_lock, flags);
5718d318a50SLinus Walleij 
5728d318a50SLinus Walleij 	if (d40c->phy_chan->num % 2 == 0)
5738d318a50SLinus Walleij 		active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
5748d318a50SLinus Walleij 	else
5758d318a50SLinus Walleij 		active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
5768d318a50SLinus Walleij 
5778d318a50SLinus Walleij 	if (command == D40_DMA_SUSPEND_REQ) {
5788d318a50SLinus Walleij 		status = (readl(active_reg) &
5798d318a50SLinus Walleij 			  D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
5808d318a50SLinus Walleij 			D40_CHAN_POS(d40c->phy_chan->num);
5818d318a50SLinus Walleij 
5828d318a50SLinus Walleij 		if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
5838d318a50SLinus Walleij 			goto done;
5848d318a50SLinus Walleij 	}
5858d318a50SLinus Walleij 
5861d392a7bSJonas Aaberg 	wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
5871d392a7bSJonas Aaberg 	writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
5881d392a7bSJonas Aaberg 	       active_reg);
5898d318a50SLinus Walleij 
5908d318a50SLinus Walleij 	if (command == D40_DMA_SUSPEND_REQ) {
5918d318a50SLinus Walleij 
5928d318a50SLinus Walleij 		for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
5938d318a50SLinus Walleij 			status = (readl(active_reg) &
5948d318a50SLinus Walleij 				  D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
5958d318a50SLinus Walleij 				D40_CHAN_POS(d40c->phy_chan->num);
5968d318a50SLinus Walleij 
5978d318a50SLinus Walleij 			cpu_relax();
5988d318a50SLinus Walleij 			/*
5998d318a50SLinus Walleij 			 * Reduce the number of bus accesses while
6008d318a50SLinus Walleij 			 * waiting for the DMA to suspend.
6018d318a50SLinus Walleij 			 */
6028d318a50SLinus Walleij 			udelay(3);
6038d318a50SLinus Walleij 
6048d318a50SLinus Walleij 			if (status == D40_DMA_STOP ||
6058d318a50SLinus Walleij 			    status == D40_DMA_SUSPENDED)
6068d318a50SLinus Walleij 				break;
6078d318a50SLinus Walleij 		}
6088d318a50SLinus Walleij 
6098d318a50SLinus Walleij 		if (i == D40_SUSPEND_MAX_IT) {
6108d318a50SLinus Walleij 			dev_err(&d40c->chan.dev->device,
6118d318a50SLinus Walleij 				"[%s]: unable to suspend the chl %d (log: %d) status %x\n",
6128d318a50SLinus Walleij 				__func__, d40c->phy_chan->num, d40c->log_num,
6138d318a50SLinus Walleij 				status);
6148d318a50SLinus Walleij 			dump_stack();
6158d318a50SLinus Walleij 			ret = -EBUSY;
6168d318a50SLinus Walleij 		}
6178d318a50SLinus Walleij 
6188d318a50SLinus Walleij 	}
6198d318a50SLinus Walleij done:
6208d318a50SLinus Walleij 	spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
6218d318a50SLinus Walleij 	return ret;
6228d318a50SLinus Walleij }
6238d318a50SLinus Walleij 
6248d318a50SLinus Walleij static void d40_term_all(struct d40_chan *d40c)
6258d318a50SLinus Walleij {
6268d318a50SLinus Walleij 	struct d40_desc *d40d;
6278d318a50SLinus Walleij 
6288d318a50SLinus Walleij 	/* Release active descriptors */
6298d318a50SLinus Walleij 	while ((d40d = d40_first_active_get(d40c))) {
6308d318a50SLinus Walleij 		d40_desc_remove(d40d);
6318d318a50SLinus Walleij 		d40_desc_free(d40c, d40d);
6328d318a50SLinus Walleij 	}
6338d318a50SLinus Walleij 
6348d318a50SLinus Walleij 	/* Release queued descriptors waiting for transfer */
6358d318a50SLinus Walleij 	while ((d40d = d40_first_queued(d40c))) {
6368d318a50SLinus Walleij 		d40_desc_remove(d40d);
6378d318a50SLinus Walleij 		d40_desc_free(d40c, d40d);
6388d318a50SLinus Walleij 	}
6398d318a50SLinus Walleij 
6408d318a50SLinus Walleij 
6418d318a50SLinus Walleij 	d40c->pending_tx = 0;
6428d318a50SLinus Walleij 	d40c->busy = false;
6438d318a50SLinus Walleij }
6448d318a50SLinus Walleij 
6458d318a50SLinus Walleij static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
6468d318a50SLinus Walleij {
6478d318a50SLinus Walleij 	u32 val;
6488d318a50SLinus Walleij 	unsigned long flags;
6498d318a50SLinus Walleij 
6500c32269dSJonas Aaberg 	/* Notice, that disable requires the physical channel to be stopped */
6518d318a50SLinus Walleij 	if (do_enable)
6528d318a50SLinus Walleij 		val = D40_ACTIVATE_EVENTLINE;
6538d318a50SLinus Walleij 	else
6548d318a50SLinus Walleij 		val = D40_DEACTIVATE_EVENTLINE;
6558d318a50SLinus Walleij 
6568d318a50SLinus Walleij 	spin_lock_irqsave(&d40c->phy_chan->lock, flags);
6578d318a50SLinus Walleij 
6588d318a50SLinus Walleij 	/* Enable event line connected to device (or memcpy) */
6598d318a50SLinus Walleij 	if ((d40c->dma_cfg.dir ==  STEDMA40_PERIPH_TO_MEM) ||
6608d318a50SLinus Walleij 	    (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
6618d318a50SLinus Walleij 		u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
6628d318a50SLinus Walleij 
6638d318a50SLinus Walleij 		writel((val << D40_EVENTLINE_POS(event)) |
6648d318a50SLinus Walleij 		       ~D40_EVENTLINE_MASK(event),
6658d318a50SLinus Walleij 		       d40c->base->virtbase + D40_DREG_PCBASE +
6668d318a50SLinus Walleij 		       d40c->phy_chan->num * D40_DREG_PCDELTA +
6678d318a50SLinus Walleij 		       D40_CHAN_REG_SSLNK);
6688d318a50SLinus Walleij 	}
6698d318a50SLinus Walleij 	if (d40c->dma_cfg.dir !=  STEDMA40_PERIPH_TO_MEM) {
6708d318a50SLinus Walleij 		u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
6718d318a50SLinus Walleij 
6728d318a50SLinus Walleij 		writel((val << D40_EVENTLINE_POS(event)) |
6738d318a50SLinus Walleij 		       ~D40_EVENTLINE_MASK(event),
6748d318a50SLinus Walleij 		       d40c->base->virtbase + D40_DREG_PCBASE +
6758d318a50SLinus Walleij 		       d40c->phy_chan->num * D40_DREG_PCDELTA +
6768d318a50SLinus Walleij 		       D40_CHAN_REG_SDLNK);
6778d318a50SLinus Walleij 	}
6788d318a50SLinus Walleij 
6798d318a50SLinus Walleij 	spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
6808d318a50SLinus Walleij }
6818d318a50SLinus Walleij 
682a5ebca47SJonas Aaberg static u32 d40_chan_has_events(struct d40_chan *d40c)
6838d318a50SLinus Walleij {
684be8cb7dfSJonas Aaberg 	u32 val;
6858d318a50SLinus Walleij 
6868d318a50SLinus Walleij 	val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
6878d318a50SLinus Walleij 		    d40c->phy_chan->num * D40_DREG_PCDELTA +
6888d318a50SLinus Walleij 		    D40_CHAN_REG_SSLNK);
6898d318a50SLinus Walleij 
690be8cb7dfSJonas Aaberg 	val |= readl(d40c->base->virtbase + D40_DREG_PCBASE +
6918d318a50SLinus Walleij 		     d40c->phy_chan->num * D40_DREG_PCDELTA +
6928d318a50SLinus Walleij 		     D40_CHAN_REG_SDLNK);
693a5ebca47SJonas Aaberg 	return val;
6948d318a50SLinus Walleij }
6958d318a50SLinus Walleij 
696b55912c6SJonas Aaberg static void d40_config_write(struct d40_chan *d40c)
6978d318a50SLinus Walleij {
6988d318a50SLinus Walleij 	u32 addr_base;
6998d318a50SLinus Walleij 	u32 var;
7008d318a50SLinus Walleij 
7018d318a50SLinus Walleij 	/* Odd addresses are even addresses + 4 */
7028d318a50SLinus Walleij 	addr_base = (d40c->phy_chan->num % 2) * 4;
7038d318a50SLinus Walleij 	/* Setup channel mode to logical or physical */
7048d318a50SLinus Walleij 	var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
7058d318a50SLinus Walleij 		D40_CHAN_POS(d40c->phy_chan->num);
7068d318a50SLinus Walleij 	writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
7078d318a50SLinus Walleij 
7088d318a50SLinus Walleij 	/* Setup operational mode option register */
7098d318a50SLinus Walleij 	var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
7108d318a50SLinus Walleij 	       0x3) << D40_CHAN_POS(d40c->phy_chan->num);
7118d318a50SLinus Walleij 
7128d318a50SLinus Walleij 	writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
7138d318a50SLinus Walleij 
7148d318a50SLinus Walleij 	if (d40c->log_num != D40_PHY_CHAN) {
7158d318a50SLinus Walleij 		/* Set default config for CFG reg */
7168d318a50SLinus Walleij 		writel(d40c->src_def_cfg,
7178d318a50SLinus Walleij 		       d40c->base->virtbase + D40_DREG_PCBASE +
7188d318a50SLinus Walleij 		       d40c->phy_chan->num * D40_DREG_PCDELTA +
7198d318a50SLinus Walleij 		       D40_CHAN_REG_SSCFG);
7208d318a50SLinus Walleij 		writel(d40c->dst_def_cfg,
7218d318a50SLinus Walleij 		       d40c->base->virtbase + D40_DREG_PCBASE +
7228d318a50SLinus Walleij 		       d40c->phy_chan->num * D40_DREG_PCDELTA +
7238d318a50SLinus Walleij 		       D40_CHAN_REG_SDCFG);
7248d318a50SLinus Walleij 
725b55912c6SJonas Aaberg 		/* Set LIDX for lcla */
726b55912c6SJonas Aaberg 		writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
727b55912c6SJonas Aaberg 		       D40_SREG_ELEM_LOG_LIDX_MASK,
728b55912c6SJonas Aaberg 		       d40c->base->virtbase + D40_DREG_PCBASE +
729b55912c6SJonas Aaberg 		       d40c->phy_chan->num * D40_DREG_PCDELTA +
730b55912c6SJonas Aaberg 		       D40_CHAN_REG_SDELT);
731b55912c6SJonas Aaberg 
732b55912c6SJonas Aaberg 		writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
733b55912c6SJonas Aaberg 		       D40_SREG_ELEM_LOG_LIDX_MASK,
734b55912c6SJonas Aaberg 		       d40c->base->virtbase + D40_DREG_PCBASE +
735b55912c6SJonas Aaberg 		       d40c->phy_chan->num * D40_DREG_PCDELTA +
736b55912c6SJonas Aaberg 		       D40_CHAN_REG_SSELT);
737b55912c6SJonas Aaberg 
7388d318a50SLinus Walleij 	}
7398d318a50SLinus Walleij }
7408d318a50SLinus Walleij 
741aa182ae2SJonas Aaberg static u32 d40_residue(struct d40_chan *d40c)
742aa182ae2SJonas Aaberg {
743aa182ae2SJonas Aaberg 	u32 num_elt;
744aa182ae2SJonas Aaberg 
745aa182ae2SJonas Aaberg 	if (d40c->log_num != D40_PHY_CHAN)
746aa182ae2SJonas Aaberg 		num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
747aa182ae2SJonas Aaberg 			>> D40_MEM_LCSP2_ECNT_POS;
748aa182ae2SJonas Aaberg 	else
749aa182ae2SJonas Aaberg 		num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
750aa182ae2SJonas Aaberg 				 d40c->phy_chan->num * D40_DREG_PCDELTA +
751aa182ae2SJonas Aaberg 				 D40_CHAN_REG_SDELT) &
752aa182ae2SJonas Aaberg 			   D40_SREG_ELEM_PHY_ECNT_MASK) >>
753aa182ae2SJonas Aaberg 			D40_SREG_ELEM_PHY_ECNT_POS;
754aa182ae2SJonas Aaberg 	return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
755aa182ae2SJonas Aaberg }
756aa182ae2SJonas Aaberg 
757aa182ae2SJonas Aaberg static bool d40_tx_is_linked(struct d40_chan *d40c)
758aa182ae2SJonas Aaberg {
759aa182ae2SJonas Aaberg 	bool is_link;
760aa182ae2SJonas Aaberg 
761aa182ae2SJonas Aaberg 	if (d40c->log_num != D40_PHY_CHAN)
762aa182ae2SJonas Aaberg 		is_link = readl(&d40c->lcpa->lcsp3) &  D40_MEM_LCSP3_DLOS_MASK;
763aa182ae2SJonas Aaberg 	else
764aa182ae2SJonas Aaberg 		is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
765aa182ae2SJonas Aaberg 				d40c->phy_chan->num * D40_DREG_PCDELTA +
766aa182ae2SJonas Aaberg 				D40_CHAN_REG_SDLNK) &
767aa182ae2SJonas Aaberg 			D40_SREG_LNK_PHYS_LNK_MASK;
768aa182ae2SJonas Aaberg 	return is_link;
769aa182ae2SJonas Aaberg }
770aa182ae2SJonas Aaberg 
771aa182ae2SJonas Aaberg static int d40_pause(struct dma_chan *chan)
772aa182ae2SJonas Aaberg {
773aa182ae2SJonas Aaberg 	struct d40_chan *d40c =
774aa182ae2SJonas Aaberg 		container_of(chan, struct d40_chan, chan);
775aa182ae2SJonas Aaberg 	int res = 0;
776aa182ae2SJonas Aaberg 	unsigned long flags;
777aa182ae2SJonas Aaberg 
7783ac012afSJonas Aaberg 	if (!d40c->busy)
7793ac012afSJonas Aaberg 		return 0;
7803ac012afSJonas Aaberg 
781aa182ae2SJonas Aaberg 	spin_lock_irqsave(&d40c->lock, flags);
782aa182ae2SJonas Aaberg 
783aa182ae2SJonas Aaberg 	res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
784aa182ae2SJonas Aaberg 	if (res == 0) {
785aa182ae2SJonas Aaberg 		if (d40c->log_num != D40_PHY_CHAN) {
786aa182ae2SJonas Aaberg 			d40_config_set_event(d40c, false);
787aa182ae2SJonas Aaberg 			/* Resume the other logical channels if any */
788aa182ae2SJonas Aaberg 			if (d40_chan_has_events(d40c))
789aa182ae2SJonas Aaberg 				res = d40_channel_execute_command(d40c,
790aa182ae2SJonas Aaberg 								  D40_DMA_RUN);
791aa182ae2SJonas Aaberg 		}
792aa182ae2SJonas Aaberg 	}
793aa182ae2SJonas Aaberg 
794aa182ae2SJonas Aaberg 	spin_unlock_irqrestore(&d40c->lock, flags);
795aa182ae2SJonas Aaberg 	return res;
796aa182ae2SJonas Aaberg }
797aa182ae2SJonas Aaberg 
798aa182ae2SJonas Aaberg static int d40_resume(struct dma_chan *chan)
799aa182ae2SJonas Aaberg {
800aa182ae2SJonas Aaberg 	struct d40_chan *d40c =
801aa182ae2SJonas Aaberg 		container_of(chan, struct d40_chan, chan);
802aa182ae2SJonas Aaberg 	int res = 0;
803aa182ae2SJonas Aaberg 	unsigned long flags;
804aa182ae2SJonas Aaberg 
8053ac012afSJonas Aaberg 	if (!d40c->busy)
8063ac012afSJonas Aaberg 		return 0;
8073ac012afSJonas Aaberg 
808aa182ae2SJonas Aaberg 	spin_lock_irqsave(&d40c->lock, flags);
809aa182ae2SJonas Aaberg 
810aa182ae2SJonas Aaberg 	if (d40c->base->rev == 0)
811aa182ae2SJonas Aaberg 		if (d40c->log_num != D40_PHY_CHAN) {
812aa182ae2SJonas Aaberg 			res = d40_channel_execute_command(d40c,
813aa182ae2SJonas Aaberg 							  D40_DMA_SUSPEND_REQ);
814aa182ae2SJonas Aaberg 			goto no_suspend;
815aa182ae2SJonas Aaberg 		}
816aa182ae2SJonas Aaberg 
817aa182ae2SJonas Aaberg 	/* If bytes left to transfer or linked tx resume job */
818aa182ae2SJonas Aaberg 	if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
819aa182ae2SJonas Aaberg 
820aa182ae2SJonas Aaberg 		if (d40c->log_num != D40_PHY_CHAN)
821aa182ae2SJonas Aaberg 			d40_config_set_event(d40c, true);
822aa182ae2SJonas Aaberg 
823aa182ae2SJonas Aaberg 		res = d40_channel_execute_command(d40c, D40_DMA_RUN);
824aa182ae2SJonas Aaberg 	}
825aa182ae2SJonas Aaberg 
826aa182ae2SJonas Aaberg no_suspend:
827aa182ae2SJonas Aaberg 	spin_unlock_irqrestore(&d40c->lock, flags);
828aa182ae2SJonas Aaberg 	return res;
829aa182ae2SJonas Aaberg }
830aa182ae2SJonas Aaberg 
831aa182ae2SJonas Aaberg static void d40_tx_submit_log(struct d40_chan *d40c, struct d40_desc *d40d)
832aa182ae2SJonas Aaberg {
833aa182ae2SJonas Aaberg 	/* TODO: Write */
834aa182ae2SJonas Aaberg }
835aa182ae2SJonas Aaberg 
836aa182ae2SJonas Aaberg static void d40_tx_submit_phy(struct d40_chan *d40c, struct d40_desc *d40d)
837aa182ae2SJonas Aaberg {
838aa182ae2SJonas Aaberg 	struct d40_desc *d40d_prev = NULL;
839aa182ae2SJonas Aaberg 	int i;
840aa182ae2SJonas Aaberg 	u32 val;
841aa182ae2SJonas Aaberg 
842aa182ae2SJonas Aaberg 	if (!list_empty(&d40c->queue))
843aa182ae2SJonas Aaberg 		d40d_prev = d40_last_queued(d40c);
844aa182ae2SJonas Aaberg 	else if (!list_empty(&d40c->active))
845aa182ae2SJonas Aaberg 		d40d_prev = d40_first_active_get(d40c);
846aa182ae2SJonas Aaberg 
847aa182ae2SJonas Aaberg 	if (!d40d_prev)
848aa182ae2SJonas Aaberg 		return;
849aa182ae2SJonas Aaberg 
850aa182ae2SJonas Aaberg 	/* Here we try to join this job with previous jobs */
851aa182ae2SJonas Aaberg 	val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
852aa182ae2SJonas Aaberg 		    d40c->phy_chan->num * D40_DREG_PCDELTA +
853aa182ae2SJonas Aaberg 		    D40_CHAN_REG_SSLNK);
854aa182ae2SJonas Aaberg 
855aa182ae2SJonas Aaberg 	/* Figure out which link we're currently transmitting */
856aa182ae2SJonas Aaberg 	for (i = 0; i < d40d_prev->lli_len; i++)
857aa182ae2SJonas Aaberg 		if (val == d40d_prev->lli_phy.src[i].reg_lnk)
858aa182ae2SJonas Aaberg 			break;
859aa182ae2SJonas Aaberg 
860aa182ae2SJonas Aaberg 	val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
861aa182ae2SJonas Aaberg 		    d40c->phy_chan->num * D40_DREG_PCDELTA +
862aa182ae2SJonas Aaberg 		    D40_CHAN_REG_SSELT) >> D40_SREG_ELEM_LOG_ECNT_POS;
863aa182ae2SJonas Aaberg 
864aa182ae2SJonas Aaberg 	if (i == (d40d_prev->lli_len - 1) && val > 0) {
865aa182ae2SJonas Aaberg 		/* Change the current one */
866aa182ae2SJonas Aaberg 		writel(virt_to_phys(d40d->lli_phy.src),
867aa182ae2SJonas Aaberg 		       d40c->base->virtbase + D40_DREG_PCBASE +
868aa182ae2SJonas Aaberg 		       d40c->phy_chan->num * D40_DREG_PCDELTA +
869aa182ae2SJonas Aaberg 		       D40_CHAN_REG_SSLNK);
870aa182ae2SJonas Aaberg 		writel(virt_to_phys(d40d->lli_phy.dst),
871aa182ae2SJonas Aaberg 		       d40c->base->virtbase + D40_DREG_PCBASE +
872aa182ae2SJonas Aaberg 		       d40c->phy_chan->num * D40_DREG_PCDELTA +
873aa182ae2SJonas Aaberg 		       D40_CHAN_REG_SDLNK);
874aa182ae2SJonas Aaberg 
875aa182ae2SJonas Aaberg 		d40d->is_hw_linked = true;
876aa182ae2SJonas Aaberg 
877aa182ae2SJonas Aaberg 	} else if (i < d40d_prev->lli_len) {
878aa182ae2SJonas Aaberg 		(void) dma_unmap_single(d40c->base->dev,
879aa182ae2SJonas Aaberg 					virt_to_phys(d40d_prev->lli_phy.src),
880aa182ae2SJonas Aaberg 					d40d_prev->lli_pool.size,
881aa182ae2SJonas Aaberg 					DMA_TO_DEVICE);
882aa182ae2SJonas Aaberg 
883aa182ae2SJonas Aaberg 		/* Keep the settings */
884aa182ae2SJonas Aaberg 		val = d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk &
885aa182ae2SJonas Aaberg 			~D40_SREG_LNK_PHYS_LNK_MASK;
886aa182ae2SJonas Aaberg 		d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk =
887aa182ae2SJonas Aaberg 			val | virt_to_phys(d40d->lli_phy.src);
888aa182ae2SJonas Aaberg 
889aa182ae2SJonas Aaberg 		val = d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk &
890aa182ae2SJonas Aaberg 			~D40_SREG_LNK_PHYS_LNK_MASK;
891aa182ae2SJonas Aaberg 		d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk =
892aa182ae2SJonas Aaberg 			val | virt_to_phys(d40d->lli_phy.dst);
893aa182ae2SJonas Aaberg 
894aa182ae2SJonas Aaberg 		(void) dma_map_single(d40c->base->dev,
895aa182ae2SJonas Aaberg 				      d40d_prev->lli_phy.src,
896aa182ae2SJonas Aaberg 				      d40d_prev->lli_pool.size,
897aa182ae2SJonas Aaberg 				      DMA_TO_DEVICE);
898aa182ae2SJonas Aaberg 		d40d->is_hw_linked = true;
899aa182ae2SJonas Aaberg 	}
900aa182ae2SJonas Aaberg }
901aa182ae2SJonas Aaberg 
9028d318a50SLinus Walleij static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
9038d318a50SLinus Walleij {
9048d318a50SLinus Walleij 	struct d40_chan *d40c = container_of(tx->chan,
9058d318a50SLinus Walleij 					     struct d40_chan,
9068d318a50SLinus Walleij 					     chan);
9078d318a50SLinus Walleij 	struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
9088d318a50SLinus Walleij 	unsigned long flags;
9098d318a50SLinus Walleij 
910aa182ae2SJonas Aaberg 	(void) d40_pause(&d40c->chan);
911aa182ae2SJonas Aaberg 
9128d318a50SLinus Walleij 	spin_lock_irqsave(&d40c->lock, flags);
9138d318a50SLinus Walleij 
914aa182ae2SJonas Aaberg 	d40c->chan.cookie++;
915aa182ae2SJonas Aaberg 
916aa182ae2SJonas Aaberg 	if (d40c->chan.cookie < 0)
917aa182ae2SJonas Aaberg 		d40c->chan.cookie = 1;
918aa182ae2SJonas Aaberg 
919aa182ae2SJonas Aaberg 	d40d->txd.cookie = d40c->chan.cookie;
920aa182ae2SJonas Aaberg 
921aa182ae2SJonas Aaberg 	if (d40c->log_num == D40_PHY_CHAN)
922aa182ae2SJonas Aaberg 		d40_tx_submit_phy(d40c, d40d);
923aa182ae2SJonas Aaberg 	else
924aa182ae2SJonas Aaberg 		d40_tx_submit_log(d40c, d40d);
9258d318a50SLinus Walleij 
9268d318a50SLinus Walleij 	d40_desc_queue(d40c, d40d);
9278d318a50SLinus Walleij 
9288d318a50SLinus Walleij 	spin_unlock_irqrestore(&d40c->lock, flags);
9298d318a50SLinus Walleij 
930aa182ae2SJonas Aaberg 	(void) d40_resume(&d40c->chan);
931aa182ae2SJonas Aaberg 
9328d318a50SLinus Walleij 	return tx->cookie;
9338d318a50SLinus Walleij }
9348d318a50SLinus Walleij 
9358d318a50SLinus Walleij static int d40_start(struct d40_chan *d40c)
9368d318a50SLinus Walleij {
937f4185592SLinus Walleij 	if (d40c->base->rev == 0) {
938f4185592SLinus Walleij 		int err;
939f4185592SLinus Walleij 
940f4185592SLinus Walleij 		if (d40c->log_num != D40_PHY_CHAN) {
941f4185592SLinus Walleij 			err = d40_channel_execute_command(d40c,
942f4185592SLinus Walleij 							  D40_DMA_SUSPEND_REQ);
943f4185592SLinus Walleij 			if (err)
944f4185592SLinus Walleij 				return err;
945f4185592SLinus Walleij 		}
946f4185592SLinus Walleij 	}
947f4185592SLinus Walleij 
9480c32269dSJonas Aaberg 	if (d40c->log_num != D40_PHY_CHAN)
9498d318a50SLinus Walleij 		d40_config_set_event(d40c, true);
9508d318a50SLinus Walleij 
9510c32269dSJonas Aaberg 	return d40_channel_execute_command(d40c, D40_DMA_RUN);
9528d318a50SLinus Walleij }
9538d318a50SLinus Walleij 
9548d318a50SLinus Walleij static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
9558d318a50SLinus Walleij {
9568d318a50SLinus Walleij 	struct d40_desc *d40d;
9578d318a50SLinus Walleij 	int err;
9588d318a50SLinus Walleij 
9598d318a50SLinus Walleij 	/* Start queued jobs, if any */
9608d318a50SLinus Walleij 	d40d = d40_first_queued(d40c);
9618d318a50SLinus Walleij 
9628d318a50SLinus Walleij 	if (d40d != NULL) {
9638d318a50SLinus Walleij 		d40c->busy = true;
9648d318a50SLinus Walleij 
9658d318a50SLinus Walleij 		/* Remove from queue */
9668d318a50SLinus Walleij 		d40_desc_remove(d40d);
9678d318a50SLinus Walleij 
9688d318a50SLinus Walleij 		/* Add to active queue */
9698d318a50SLinus Walleij 		d40_desc_submit(d40c, d40d);
9708d318a50SLinus Walleij 
971aa182ae2SJonas Aaberg 		/*
972aa182ae2SJonas Aaberg 		 * If this job is already linked in hw,
973aa182ae2SJonas Aaberg 		 * do not submit it.
974aa182ae2SJonas Aaberg 		 */
975698e4732SJonas Aaberg 
976aa182ae2SJonas Aaberg 		if (!d40d->is_hw_linked) {
9778d318a50SLinus Walleij 			/* Initiate DMA job */
9788d318a50SLinus Walleij 			d40_desc_load(d40c, d40d);
9798d318a50SLinus Walleij 
9808d318a50SLinus Walleij 			/* Start dma job */
9818d318a50SLinus Walleij 			err = d40_start(d40c);
9828d318a50SLinus Walleij 
9838d318a50SLinus Walleij 			if (err)
9848d318a50SLinus Walleij 				return NULL;
9858d318a50SLinus Walleij 		}
986aa182ae2SJonas Aaberg 	}
9878d318a50SLinus Walleij 
9888d318a50SLinus Walleij 	return d40d;
9898d318a50SLinus Walleij }
9908d318a50SLinus Walleij 
9918d318a50SLinus Walleij /* called from interrupt context */
9928d318a50SLinus Walleij static void dma_tc_handle(struct d40_chan *d40c)
9938d318a50SLinus Walleij {
9948d318a50SLinus Walleij 	struct d40_desc *d40d;
9958d318a50SLinus Walleij 
9968d318a50SLinus Walleij 	/* Get first active entry from list */
9978d318a50SLinus Walleij 	d40d = d40_first_active_get(d40c);
9988d318a50SLinus Walleij 
9998d318a50SLinus Walleij 	if (d40d == NULL)
10008d318a50SLinus Walleij 		return;
10018d318a50SLinus Walleij 
1002698e4732SJonas Aaberg 	d40_lcla_free_all(d40c, d40d);
10038d318a50SLinus Walleij 
1004698e4732SJonas Aaberg 	if (d40d->lli_current < d40d->lli_len) {
10058d318a50SLinus Walleij 		d40_desc_load(d40c, d40d);
10068d318a50SLinus Walleij 		/* Start dma job */
10078d318a50SLinus Walleij 		(void) d40_start(d40c);
10088d318a50SLinus Walleij 		return;
10098d318a50SLinus Walleij 	}
10108d318a50SLinus Walleij 
10118d318a50SLinus Walleij 	if (d40_queue_start(d40c) == NULL)
10128d318a50SLinus Walleij 		d40c->busy = false;
10138d318a50SLinus Walleij 
10148d318a50SLinus Walleij 	d40c->pending_tx++;
10158d318a50SLinus Walleij 	tasklet_schedule(&d40c->tasklet);
10168d318a50SLinus Walleij 
10178d318a50SLinus Walleij }
10188d318a50SLinus Walleij 
10198d318a50SLinus Walleij static void dma_tasklet(unsigned long data)
10208d318a50SLinus Walleij {
10218d318a50SLinus Walleij 	struct d40_chan *d40c = (struct d40_chan *) data;
1022767a9675SJonas Aaberg 	struct d40_desc *d40d;
10238d318a50SLinus Walleij 	unsigned long flags;
10248d318a50SLinus Walleij 	dma_async_tx_callback callback;
10258d318a50SLinus Walleij 	void *callback_param;
10268d318a50SLinus Walleij 
10278d318a50SLinus Walleij 	spin_lock_irqsave(&d40c->lock, flags);
10288d318a50SLinus Walleij 
10298d318a50SLinus Walleij 	/* Get first active entry from list */
1030767a9675SJonas Aaberg 	d40d = d40_first_active_get(d40c);
10318d318a50SLinus Walleij 
1032767a9675SJonas Aaberg 	if (d40d == NULL)
10338d318a50SLinus Walleij 		goto err;
10348d318a50SLinus Walleij 
1035767a9675SJonas Aaberg 	d40c->completed = d40d->txd.cookie;
10368d318a50SLinus Walleij 
10378d318a50SLinus Walleij 	/*
10388d318a50SLinus Walleij 	 * If terminating a channel pending_tx is set to zero.
10398d318a50SLinus Walleij 	 * This prevents any finished active jobs to return to the client.
10408d318a50SLinus Walleij 	 */
10418d318a50SLinus Walleij 	if (d40c->pending_tx == 0) {
10428d318a50SLinus Walleij 		spin_unlock_irqrestore(&d40c->lock, flags);
10438d318a50SLinus Walleij 		return;
10448d318a50SLinus Walleij 	}
10458d318a50SLinus Walleij 
10468d318a50SLinus Walleij 	/* Callback to client */
1047767a9675SJonas Aaberg 	callback = d40d->txd.callback;
1048767a9675SJonas Aaberg 	callback_param = d40d->txd.callback_param;
10498d318a50SLinus Walleij 
1050767a9675SJonas Aaberg 	if (async_tx_test_ack(&d40d->txd)) {
1051767a9675SJonas Aaberg 		d40_pool_lli_free(d40d);
1052767a9675SJonas Aaberg 		d40_desc_remove(d40d);
1053767a9675SJonas Aaberg 		d40_desc_free(d40c, d40d);
10548d318a50SLinus Walleij 	} else {
1055767a9675SJonas Aaberg 		if (!d40d->is_in_client_list) {
1056767a9675SJonas Aaberg 			d40_desc_remove(d40d);
1057698e4732SJonas Aaberg 			d40_lcla_free_all(d40c, d40d);
1058767a9675SJonas Aaberg 			list_add_tail(&d40d->node, &d40c->client);
1059767a9675SJonas Aaberg 			d40d->is_in_client_list = true;
10608d318a50SLinus Walleij 		}
10618d318a50SLinus Walleij 	}
10628d318a50SLinus Walleij 
10638d318a50SLinus Walleij 	d40c->pending_tx--;
10648d318a50SLinus Walleij 
10658d318a50SLinus Walleij 	if (d40c->pending_tx)
10668d318a50SLinus Walleij 		tasklet_schedule(&d40c->tasklet);
10678d318a50SLinus Walleij 
10688d318a50SLinus Walleij 	spin_unlock_irqrestore(&d40c->lock, flags);
10698d318a50SLinus Walleij 
1070767a9675SJonas Aaberg 	if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
10718d318a50SLinus Walleij 		callback(callback_param);
10728d318a50SLinus Walleij 
10738d318a50SLinus Walleij 	return;
10748d318a50SLinus Walleij 
10758d318a50SLinus Walleij  err:
10768d318a50SLinus Walleij 	/* Rescue manouver if receiving double interrupts */
10778d318a50SLinus Walleij 	if (d40c->pending_tx > 0)
10788d318a50SLinus Walleij 		d40c->pending_tx--;
10798d318a50SLinus Walleij 	spin_unlock_irqrestore(&d40c->lock, flags);
10808d318a50SLinus Walleij }
10818d318a50SLinus Walleij 
10828d318a50SLinus Walleij static irqreturn_t d40_handle_interrupt(int irq, void *data)
10838d318a50SLinus Walleij {
10848d318a50SLinus Walleij 	static const struct d40_interrupt_lookup il[] = {
10858d318a50SLinus Walleij 		{D40_DREG_LCTIS0, D40_DREG_LCICR0, false,  0},
10868d318a50SLinus Walleij 		{D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
10878d318a50SLinus Walleij 		{D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
10888d318a50SLinus Walleij 		{D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
10898d318a50SLinus Walleij 		{D40_DREG_LCEIS0, D40_DREG_LCICR0, true,   0},
10908d318a50SLinus Walleij 		{D40_DREG_LCEIS1, D40_DREG_LCICR1, true,  32},
10918d318a50SLinus Walleij 		{D40_DREG_LCEIS2, D40_DREG_LCICR2, true,  64},
10928d318a50SLinus Walleij 		{D40_DREG_LCEIS3, D40_DREG_LCICR3, true,  96},
10938d318a50SLinus Walleij 		{D40_DREG_PCTIS,  D40_DREG_PCICR,  false, D40_PHY_CHAN},
10948d318a50SLinus Walleij 		{D40_DREG_PCEIS,  D40_DREG_PCICR,  true,  D40_PHY_CHAN},
10958d318a50SLinus Walleij 	};
10968d318a50SLinus Walleij 
10978d318a50SLinus Walleij 	int i;
10988d318a50SLinus Walleij 	u32 regs[ARRAY_SIZE(il)];
10998d318a50SLinus Walleij 	u32 idx;
11008d318a50SLinus Walleij 	u32 row;
11018d318a50SLinus Walleij 	long chan = -1;
11028d318a50SLinus Walleij 	struct d40_chan *d40c;
11038d318a50SLinus Walleij 	unsigned long flags;
11048d318a50SLinus Walleij 	struct d40_base *base = data;
11058d318a50SLinus Walleij 
11068d318a50SLinus Walleij 	spin_lock_irqsave(&base->interrupt_lock, flags);
11078d318a50SLinus Walleij 
11088d318a50SLinus Walleij 	/* Read interrupt status of both logical and physical channels */
11098d318a50SLinus Walleij 	for (i = 0; i < ARRAY_SIZE(il); i++)
11108d318a50SLinus Walleij 		regs[i] = readl(base->virtbase + il[i].src);
11118d318a50SLinus Walleij 
11128d318a50SLinus Walleij 	for (;;) {
11138d318a50SLinus Walleij 
11148d318a50SLinus Walleij 		chan = find_next_bit((unsigned long *)regs,
11158d318a50SLinus Walleij 				     BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
11168d318a50SLinus Walleij 
11178d318a50SLinus Walleij 		/* No more set bits found? */
11188d318a50SLinus Walleij 		if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
11198d318a50SLinus Walleij 			break;
11208d318a50SLinus Walleij 
11218d318a50SLinus Walleij 		row = chan / BITS_PER_LONG;
11228d318a50SLinus Walleij 		idx = chan & (BITS_PER_LONG - 1);
11238d318a50SLinus Walleij 
11248d318a50SLinus Walleij 		/* ACK interrupt */
11251b00348dSJonas Aaberg 		writel(1 << idx, base->virtbase + il[row].clr);
11268d318a50SLinus Walleij 
11278d318a50SLinus Walleij 		if (il[row].offset == D40_PHY_CHAN)
11288d318a50SLinus Walleij 			d40c = base->lookup_phy_chans[idx];
11298d318a50SLinus Walleij 		else
11308d318a50SLinus Walleij 			d40c = base->lookup_log_chans[il[row].offset + idx];
11318d318a50SLinus Walleij 		spin_lock(&d40c->lock);
11328d318a50SLinus Walleij 
11338d318a50SLinus Walleij 		if (!il[row].is_error)
11348d318a50SLinus Walleij 			dma_tc_handle(d40c);
11358d318a50SLinus Walleij 		else
1136508849adSLinus Walleij 			dev_err(base->dev,
1137508849adSLinus Walleij 				"[%s] IRQ chan: %ld offset %d idx %d\n",
11388d318a50SLinus Walleij 				__func__, chan, il[row].offset, idx);
11398d318a50SLinus Walleij 
11408d318a50SLinus Walleij 		spin_unlock(&d40c->lock);
11418d318a50SLinus Walleij 	}
11428d318a50SLinus Walleij 
11438d318a50SLinus Walleij 	spin_unlock_irqrestore(&base->interrupt_lock, flags);
11448d318a50SLinus Walleij 
11458d318a50SLinus Walleij 	return IRQ_HANDLED;
11468d318a50SLinus Walleij }
11478d318a50SLinus Walleij 
11488d318a50SLinus Walleij static int d40_validate_conf(struct d40_chan *d40c,
11498d318a50SLinus Walleij 			     struct stedma40_chan_cfg *conf)
11508d318a50SLinus Walleij {
11518d318a50SLinus Walleij 	int res = 0;
11528d318a50SLinus Walleij 	u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
11538d318a50SLinus Walleij 	u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
1154*38bdbf02SRabin Vincent 	bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
11558d318a50SLinus Walleij 
11560747c7baSLinus Walleij 	if (!conf->dir) {
11570747c7baSLinus Walleij 		dev_err(&d40c->chan.dev->device, "[%s] Invalid direction.\n",
11580747c7baSLinus Walleij 			__func__);
11590747c7baSLinus Walleij 		res = -EINVAL;
11600747c7baSLinus Walleij 	}
11610747c7baSLinus Walleij 
11620747c7baSLinus Walleij 	if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY &&
11630747c7baSLinus Walleij 	    d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
11640747c7baSLinus Walleij 	    d40c->runtime_addr == 0) {
11650747c7baSLinus Walleij 
11660747c7baSLinus Walleij 		dev_err(&d40c->chan.dev->device,
11670747c7baSLinus Walleij 			"[%s] Invalid TX channel address (%d)\n",
11680747c7baSLinus Walleij 			__func__, conf->dst_dev_type);
11690747c7baSLinus Walleij 		res = -EINVAL;
11700747c7baSLinus Walleij 	}
11710747c7baSLinus Walleij 
11720747c7baSLinus Walleij 	if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
11730747c7baSLinus Walleij 	    d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
11740747c7baSLinus Walleij 	    d40c->runtime_addr == 0) {
11750747c7baSLinus Walleij 		dev_err(&d40c->chan.dev->device,
11760747c7baSLinus Walleij 			"[%s] Invalid RX channel address (%d)\n",
11770747c7baSLinus Walleij 			__func__, conf->src_dev_type);
11780747c7baSLinus Walleij 		res = -EINVAL;
11790747c7baSLinus Walleij 	}
11800747c7baSLinus Walleij 
11810747c7baSLinus Walleij 	if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
11828d318a50SLinus Walleij 	    dst_event_group == STEDMA40_DEV_DST_MEMORY) {
11838d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
11848d318a50SLinus Walleij 			__func__);
11858d318a50SLinus Walleij 		res = -EINVAL;
11868d318a50SLinus Walleij 	}
11878d318a50SLinus Walleij 
11880747c7baSLinus Walleij 	if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
11898d318a50SLinus Walleij 	    src_event_group == STEDMA40_DEV_SRC_MEMORY) {
11908d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
11918d318a50SLinus Walleij 			__func__);
11928d318a50SLinus Walleij 		res = -EINVAL;
11938d318a50SLinus Walleij 	}
11948d318a50SLinus Walleij 
11958d318a50SLinus Walleij 	if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
11968d318a50SLinus Walleij 	    dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
11978d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
11988d318a50SLinus Walleij 			"[%s] No event line\n", __func__);
11998d318a50SLinus Walleij 		res = -EINVAL;
12008d318a50SLinus Walleij 	}
12018d318a50SLinus Walleij 
12028d318a50SLinus Walleij 	if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
12038d318a50SLinus Walleij 	    (src_event_group != dst_event_group)) {
12048d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
12058d318a50SLinus Walleij 			"[%s] Invalid event group\n", __func__);
12068d318a50SLinus Walleij 		res = -EINVAL;
12078d318a50SLinus Walleij 	}
12088d318a50SLinus Walleij 
12098d318a50SLinus Walleij 	if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
12108d318a50SLinus Walleij 		/*
12118d318a50SLinus Walleij 		 * DMAC HW supports it. Will be added to this driver,
12128d318a50SLinus Walleij 		 * in case any dma client requires it.
12138d318a50SLinus Walleij 		 */
12148d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
12158d318a50SLinus Walleij 			"[%s] periph to periph not supported\n",
12168d318a50SLinus Walleij 			__func__);
12178d318a50SLinus Walleij 		res = -EINVAL;
12188d318a50SLinus Walleij 	}
12198d318a50SLinus Walleij 
12208d318a50SLinus Walleij 	return res;
12218d318a50SLinus Walleij }
12228d318a50SLinus Walleij 
12238d318a50SLinus Walleij static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
12244aed79b2SMarcin Mielczarczyk 			       int log_event_line, bool is_log)
12258d318a50SLinus Walleij {
12268d318a50SLinus Walleij 	unsigned long flags;
12278d318a50SLinus Walleij 	spin_lock_irqsave(&phy->lock, flags);
12284aed79b2SMarcin Mielczarczyk 	if (!is_log) {
12298d318a50SLinus Walleij 		/* Physical interrupts are masked per physical full channel */
12308d318a50SLinus Walleij 		if (phy->allocated_src == D40_ALLOC_FREE &&
12318d318a50SLinus Walleij 		    phy->allocated_dst == D40_ALLOC_FREE) {
12328d318a50SLinus Walleij 			phy->allocated_dst = D40_ALLOC_PHY;
12338d318a50SLinus Walleij 			phy->allocated_src = D40_ALLOC_PHY;
12348d318a50SLinus Walleij 			goto found;
12358d318a50SLinus Walleij 		} else
12368d318a50SLinus Walleij 			goto not_found;
12378d318a50SLinus Walleij 	}
12388d318a50SLinus Walleij 
12398d318a50SLinus Walleij 	/* Logical channel */
12408d318a50SLinus Walleij 	if (is_src) {
12418d318a50SLinus Walleij 		if (phy->allocated_src == D40_ALLOC_PHY)
12428d318a50SLinus Walleij 			goto not_found;
12438d318a50SLinus Walleij 
12448d318a50SLinus Walleij 		if (phy->allocated_src == D40_ALLOC_FREE)
12458d318a50SLinus Walleij 			phy->allocated_src = D40_ALLOC_LOG_FREE;
12468d318a50SLinus Walleij 
12478d318a50SLinus Walleij 		if (!(phy->allocated_src & (1 << log_event_line))) {
12488d318a50SLinus Walleij 			phy->allocated_src |= 1 << log_event_line;
12498d318a50SLinus Walleij 			goto found;
12508d318a50SLinus Walleij 		} else
12518d318a50SLinus Walleij 			goto not_found;
12528d318a50SLinus Walleij 	} else {
12538d318a50SLinus Walleij 		if (phy->allocated_dst == D40_ALLOC_PHY)
12548d318a50SLinus Walleij 			goto not_found;
12558d318a50SLinus Walleij 
12568d318a50SLinus Walleij 		if (phy->allocated_dst == D40_ALLOC_FREE)
12578d318a50SLinus Walleij 			phy->allocated_dst = D40_ALLOC_LOG_FREE;
12588d318a50SLinus Walleij 
12598d318a50SLinus Walleij 		if (!(phy->allocated_dst & (1 << log_event_line))) {
12608d318a50SLinus Walleij 			phy->allocated_dst |= 1 << log_event_line;
12618d318a50SLinus Walleij 			goto found;
12628d318a50SLinus Walleij 		} else
12638d318a50SLinus Walleij 			goto not_found;
12648d318a50SLinus Walleij 	}
12658d318a50SLinus Walleij 
12668d318a50SLinus Walleij not_found:
12678d318a50SLinus Walleij 	spin_unlock_irqrestore(&phy->lock, flags);
12688d318a50SLinus Walleij 	return false;
12698d318a50SLinus Walleij found:
12708d318a50SLinus Walleij 	spin_unlock_irqrestore(&phy->lock, flags);
12718d318a50SLinus Walleij 	return true;
12728d318a50SLinus Walleij }
12738d318a50SLinus Walleij 
12748d318a50SLinus Walleij static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
12758d318a50SLinus Walleij 			       int log_event_line)
12768d318a50SLinus Walleij {
12778d318a50SLinus Walleij 	unsigned long flags;
12788d318a50SLinus Walleij 	bool is_free = false;
12798d318a50SLinus Walleij 
12808d318a50SLinus Walleij 	spin_lock_irqsave(&phy->lock, flags);
12818d318a50SLinus Walleij 	if (!log_event_line) {
12828d318a50SLinus Walleij 		phy->allocated_dst = D40_ALLOC_FREE;
12838d318a50SLinus Walleij 		phy->allocated_src = D40_ALLOC_FREE;
12848d318a50SLinus Walleij 		is_free = true;
12858d318a50SLinus Walleij 		goto out;
12868d318a50SLinus Walleij 	}
12878d318a50SLinus Walleij 
12888d318a50SLinus Walleij 	/* Logical channel */
12898d318a50SLinus Walleij 	if (is_src) {
12908d318a50SLinus Walleij 		phy->allocated_src &= ~(1 << log_event_line);
12918d318a50SLinus Walleij 		if (phy->allocated_src == D40_ALLOC_LOG_FREE)
12928d318a50SLinus Walleij 			phy->allocated_src = D40_ALLOC_FREE;
12938d318a50SLinus Walleij 	} else {
12948d318a50SLinus Walleij 		phy->allocated_dst &= ~(1 << log_event_line);
12958d318a50SLinus Walleij 		if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
12968d318a50SLinus Walleij 			phy->allocated_dst = D40_ALLOC_FREE;
12978d318a50SLinus Walleij 	}
12988d318a50SLinus Walleij 
12998d318a50SLinus Walleij 	is_free = ((phy->allocated_src | phy->allocated_dst) ==
13008d318a50SLinus Walleij 		   D40_ALLOC_FREE);
13018d318a50SLinus Walleij 
13028d318a50SLinus Walleij out:
13038d318a50SLinus Walleij 	spin_unlock_irqrestore(&phy->lock, flags);
13048d318a50SLinus Walleij 
13058d318a50SLinus Walleij 	return is_free;
13068d318a50SLinus Walleij }
13078d318a50SLinus Walleij 
13088d318a50SLinus Walleij static int d40_allocate_channel(struct d40_chan *d40c)
13098d318a50SLinus Walleij {
13108d318a50SLinus Walleij 	int dev_type;
13118d318a50SLinus Walleij 	int event_group;
13128d318a50SLinus Walleij 	int event_line;
13138d318a50SLinus Walleij 	struct d40_phy_res *phys;
13148d318a50SLinus Walleij 	int i;
13158d318a50SLinus Walleij 	int j;
13168d318a50SLinus Walleij 	int log_num;
13178d318a50SLinus Walleij 	bool is_src;
1318*38bdbf02SRabin Vincent 	bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
13198d318a50SLinus Walleij 
13208d318a50SLinus Walleij 	phys = d40c->base->phy_res;
13218d318a50SLinus Walleij 
13228d318a50SLinus Walleij 	if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
13238d318a50SLinus Walleij 		dev_type = d40c->dma_cfg.src_dev_type;
13248d318a50SLinus Walleij 		log_num = 2 * dev_type;
13258d318a50SLinus Walleij 		is_src = true;
13268d318a50SLinus Walleij 	} else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
13278d318a50SLinus Walleij 		   d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
13288d318a50SLinus Walleij 		/* dst event lines are used for logical memcpy */
13298d318a50SLinus Walleij 		dev_type = d40c->dma_cfg.dst_dev_type;
13308d318a50SLinus Walleij 		log_num = 2 * dev_type + 1;
13318d318a50SLinus Walleij 		is_src = false;
13328d318a50SLinus Walleij 	} else
13338d318a50SLinus Walleij 		return -EINVAL;
13348d318a50SLinus Walleij 
13358d318a50SLinus Walleij 	event_group = D40_TYPE_TO_GROUP(dev_type);
13368d318a50SLinus Walleij 	event_line = D40_TYPE_TO_EVENT(dev_type);
13378d318a50SLinus Walleij 
13388d318a50SLinus Walleij 	if (!is_log) {
13398d318a50SLinus Walleij 		if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
13408d318a50SLinus Walleij 			/* Find physical half channel */
13418d318a50SLinus Walleij 			for (i = 0; i < d40c->base->num_phy_chans; i++) {
13428d318a50SLinus Walleij 
13434aed79b2SMarcin Mielczarczyk 				if (d40_alloc_mask_set(&phys[i], is_src,
13444aed79b2SMarcin Mielczarczyk 						       0, is_log))
13458d318a50SLinus Walleij 					goto found_phy;
13468d318a50SLinus Walleij 			}
13478d318a50SLinus Walleij 		} else
13488d318a50SLinus Walleij 			for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
13498d318a50SLinus Walleij 				int phy_num = j  + event_group * 2;
13508d318a50SLinus Walleij 				for (i = phy_num; i < phy_num + 2; i++) {
1351508849adSLinus Walleij 					if (d40_alloc_mask_set(&phys[i],
1352508849adSLinus Walleij 							       is_src,
1353508849adSLinus Walleij 							       0,
1354508849adSLinus Walleij 							       is_log))
13558d318a50SLinus Walleij 						goto found_phy;
13568d318a50SLinus Walleij 				}
13578d318a50SLinus Walleij 			}
13588d318a50SLinus Walleij 		return -EINVAL;
13598d318a50SLinus Walleij found_phy:
13608d318a50SLinus Walleij 		d40c->phy_chan = &phys[i];
13618d318a50SLinus Walleij 		d40c->log_num = D40_PHY_CHAN;
13628d318a50SLinus Walleij 		goto out;
13638d318a50SLinus Walleij 	}
13648d318a50SLinus Walleij 	if (dev_type == -1)
13658d318a50SLinus Walleij 		return -EINVAL;
13668d318a50SLinus Walleij 
13678d318a50SLinus Walleij 	/* Find logical channel */
13688d318a50SLinus Walleij 	for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
13698d318a50SLinus Walleij 		int phy_num = j + event_group * 2;
13708d318a50SLinus Walleij 		/*
13718d318a50SLinus Walleij 		 * Spread logical channels across all available physical rather
13728d318a50SLinus Walleij 		 * than pack every logical channel at the first available phy
13738d318a50SLinus Walleij 		 * channels.
13748d318a50SLinus Walleij 		 */
13758d318a50SLinus Walleij 		if (is_src) {
13768d318a50SLinus Walleij 			for (i = phy_num; i < phy_num + 2; i++) {
13778d318a50SLinus Walleij 				if (d40_alloc_mask_set(&phys[i], is_src,
13784aed79b2SMarcin Mielczarczyk 						       event_line, is_log))
13798d318a50SLinus Walleij 					goto found_log;
13808d318a50SLinus Walleij 			}
13818d318a50SLinus Walleij 		} else {
13828d318a50SLinus Walleij 			for (i = phy_num + 1; i >= phy_num; i--) {
13838d318a50SLinus Walleij 				if (d40_alloc_mask_set(&phys[i], is_src,
13844aed79b2SMarcin Mielczarczyk 						       event_line, is_log))
13858d318a50SLinus Walleij 					goto found_log;
13868d318a50SLinus Walleij 			}
13878d318a50SLinus Walleij 		}
13888d318a50SLinus Walleij 	}
13898d318a50SLinus Walleij 	return -EINVAL;
13908d318a50SLinus Walleij 
13918d318a50SLinus Walleij found_log:
13928d318a50SLinus Walleij 	d40c->phy_chan = &phys[i];
13938d318a50SLinus Walleij 	d40c->log_num = log_num;
13948d318a50SLinus Walleij out:
13958d318a50SLinus Walleij 
13968d318a50SLinus Walleij 	if (is_log)
13978d318a50SLinus Walleij 		d40c->base->lookup_log_chans[d40c->log_num] = d40c;
13988d318a50SLinus Walleij 	else
13998d318a50SLinus Walleij 		d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
14008d318a50SLinus Walleij 
14018d318a50SLinus Walleij 	return 0;
14028d318a50SLinus Walleij 
14038d318a50SLinus Walleij }
14048d318a50SLinus Walleij 
14058d318a50SLinus Walleij static int d40_config_memcpy(struct d40_chan *d40c)
14068d318a50SLinus Walleij {
14078d318a50SLinus Walleij 	dma_cap_mask_t cap = d40c->chan.device->cap_mask;
14088d318a50SLinus Walleij 
14098d318a50SLinus Walleij 	if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
14108d318a50SLinus Walleij 		d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
14118d318a50SLinus Walleij 		d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
14128d318a50SLinus Walleij 		d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
14138d318a50SLinus Walleij 			memcpy[d40c->chan.chan_id];
14148d318a50SLinus Walleij 
14158d318a50SLinus Walleij 	} else if (dma_has_cap(DMA_MEMCPY, cap) &&
14168d318a50SLinus Walleij 		   dma_has_cap(DMA_SLAVE, cap)) {
14178d318a50SLinus Walleij 		d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
14188d318a50SLinus Walleij 	} else {
14198d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
14208d318a50SLinus Walleij 			__func__);
14218d318a50SLinus Walleij 		return -EINVAL;
14228d318a50SLinus Walleij 	}
14238d318a50SLinus Walleij 
14248d318a50SLinus Walleij 	return 0;
14258d318a50SLinus Walleij }
14268d318a50SLinus Walleij 
14278d318a50SLinus Walleij 
14288d318a50SLinus Walleij static int d40_free_dma(struct d40_chan *d40c)
14298d318a50SLinus Walleij {
14308d318a50SLinus Walleij 
14318d318a50SLinus Walleij 	int res = 0;
1432d181b3a8SJonas Aaberg 	u32 event;
14338d318a50SLinus Walleij 	struct d40_phy_res *phy = d40c->phy_chan;
14348d318a50SLinus Walleij 	bool is_src;
1435a8be8627SPer Friden 	struct d40_desc *d;
1436a8be8627SPer Friden 	struct d40_desc *_d;
1437a8be8627SPer Friden 
14388d318a50SLinus Walleij 
14398d318a50SLinus Walleij 	/* Terminate all queued and active transfers */
14408d318a50SLinus Walleij 	d40_term_all(d40c);
14418d318a50SLinus Walleij 
1442a8be8627SPer Friden 	/* Release client owned descriptors */
1443a8be8627SPer Friden 	if (!list_empty(&d40c->client))
1444a8be8627SPer Friden 		list_for_each_entry_safe(d, _d, &d40c->client, node) {
1445a8be8627SPer Friden 			d40_pool_lli_free(d);
1446a8be8627SPer Friden 			d40_desc_remove(d);
1447a8be8627SPer Friden 			d40_desc_free(d40c, d);
1448a8be8627SPer Friden 		}
1449a8be8627SPer Friden 
14508d318a50SLinus Walleij 	if (phy == NULL) {
14518d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
14528d318a50SLinus Walleij 			__func__);
14538d318a50SLinus Walleij 		return -EINVAL;
14548d318a50SLinus Walleij 	}
14558d318a50SLinus Walleij 
14568d318a50SLinus Walleij 	if (phy->allocated_src == D40_ALLOC_FREE &&
14578d318a50SLinus Walleij 	    phy->allocated_dst == D40_ALLOC_FREE) {
14588d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
14598d318a50SLinus Walleij 			__func__);
14608d318a50SLinus Walleij 		return -EINVAL;
14618d318a50SLinus Walleij 	}
14628d318a50SLinus Walleij 
14638d318a50SLinus Walleij 	if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
14648d318a50SLinus Walleij 	    d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
14658d318a50SLinus Walleij 		event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
14668d318a50SLinus Walleij 		is_src = false;
14678d318a50SLinus Walleij 	} else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
14688d318a50SLinus Walleij 		event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
14698d318a50SLinus Walleij 		is_src = true;
14708d318a50SLinus Walleij 	} else {
14718d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
14728d318a50SLinus Walleij 			"[%s] Unknown direction\n", __func__);
14738d318a50SLinus Walleij 		return -EINVAL;
14748d318a50SLinus Walleij 	}
14758d318a50SLinus Walleij 
1476d181b3a8SJonas Aaberg 	res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1477d181b3a8SJonas Aaberg 	if (res) {
1478d181b3a8SJonas Aaberg 		dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n",
1479d181b3a8SJonas Aaberg 			__func__);
1480d181b3a8SJonas Aaberg 		return res;
1481d181b3a8SJonas Aaberg 	}
14828d318a50SLinus Walleij 
1483d181b3a8SJonas Aaberg 	if (d40c->log_num != D40_PHY_CHAN) {
1484d181b3a8SJonas Aaberg 		/* Release logical channel, deactivate the event line */
1485d181b3a8SJonas Aaberg 
1486d181b3a8SJonas Aaberg 		d40_config_set_event(d40c, false);
14878d318a50SLinus Walleij 		d40c->base->lookup_log_chans[d40c->log_num] = NULL;
14888d318a50SLinus Walleij 
14898d318a50SLinus Walleij 		/*
14908d318a50SLinus Walleij 		 * Check if there are more logical allocation
14918d318a50SLinus Walleij 		 * on this phy channel.
14928d318a50SLinus Walleij 		 */
14938d318a50SLinus Walleij 		if (!d40_alloc_mask_free(phy, is_src, event)) {
14948d318a50SLinus Walleij 			/* Resume the other logical channels if any */
14958d318a50SLinus Walleij 			if (d40_chan_has_events(d40c)) {
14968d318a50SLinus Walleij 				res = d40_channel_execute_command(d40c,
14978d318a50SLinus Walleij 								  D40_DMA_RUN);
14988d318a50SLinus Walleij 				if (res) {
14998d318a50SLinus Walleij 					dev_err(&d40c->chan.dev->device,
15008d318a50SLinus Walleij 						"[%s] Executing RUN command\n",
15018d318a50SLinus Walleij 						__func__);
15028d318a50SLinus Walleij 					return res;
15038d318a50SLinus Walleij 				}
15048d318a50SLinus Walleij 			}
15058d318a50SLinus Walleij 			return 0;
15068d318a50SLinus Walleij 		}
1507d181b3a8SJonas Aaberg 	} else {
1508d181b3a8SJonas Aaberg 		(void) d40_alloc_mask_free(phy, is_src, 0);
1509d181b3a8SJonas Aaberg 	}
15108d318a50SLinus Walleij 
15118d318a50SLinus Walleij 	/* Release physical channel */
15128d318a50SLinus Walleij 	res = d40_channel_execute_command(d40c, D40_DMA_STOP);
15138d318a50SLinus Walleij 	if (res) {
15148d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
15158d318a50SLinus Walleij 			"[%s] Failed to stop channel\n", __func__);
15168d318a50SLinus Walleij 		return res;
15178d318a50SLinus Walleij 	}
15188d318a50SLinus Walleij 	d40c->phy_chan = NULL;
1519ce2ca125SRabin Vincent 	d40c->configured = false;
15208d318a50SLinus Walleij 	d40c->base->lookup_phy_chans[phy->num] = NULL;
15218d318a50SLinus Walleij 
15228d318a50SLinus Walleij 	return 0;
15238d318a50SLinus Walleij }
15248d318a50SLinus Walleij 
1525a5ebca47SJonas Aaberg static bool d40_is_paused(struct d40_chan *d40c)
1526a5ebca47SJonas Aaberg {
1527a5ebca47SJonas Aaberg 	bool is_paused = false;
1528a5ebca47SJonas Aaberg 	unsigned long flags;
1529a5ebca47SJonas Aaberg 	void __iomem *active_reg;
1530a5ebca47SJonas Aaberg 	u32 status;
1531a5ebca47SJonas Aaberg 	u32 event;
1532a5ebca47SJonas Aaberg 
1533a5ebca47SJonas Aaberg 	spin_lock_irqsave(&d40c->lock, flags);
1534a5ebca47SJonas Aaberg 
1535a5ebca47SJonas Aaberg 	if (d40c->log_num == D40_PHY_CHAN) {
1536a5ebca47SJonas Aaberg 		if (d40c->phy_chan->num % 2 == 0)
1537a5ebca47SJonas Aaberg 			active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1538a5ebca47SJonas Aaberg 		else
1539a5ebca47SJonas Aaberg 			active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1540a5ebca47SJonas Aaberg 
1541a5ebca47SJonas Aaberg 		status = (readl(active_reg) &
1542a5ebca47SJonas Aaberg 			  D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1543a5ebca47SJonas Aaberg 			D40_CHAN_POS(d40c->phy_chan->num);
1544a5ebca47SJonas Aaberg 		if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1545a5ebca47SJonas Aaberg 			is_paused = true;
1546a5ebca47SJonas Aaberg 
1547a5ebca47SJonas Aaberg 		goto _exit;
1548a5ebca47SJonas Aaberg 	}
1549a5ebca47SJonas Aaberg 
1550a5ebca47SJonas Aaberg 	if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
15519dbfbd35SJonas Aaberg 	    d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1552a5ebca47SJonas Aaberg 		event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
15539dbfbd35SJonas Aaberg 		status = readl(d40c->base->virtbase + D40_DREG_PCBASE +
15549dbfbd35SJonas Aaberg 			       d40c->phy_chan->num * D40_DREG_PCDELTA +
15559dbfbd35SJonas Aaberg 			       D40_CHAN_REG_SDLNK);
15569dbfbd35SJonas Aaberg 	} else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1557a5ebca47SJonas Aaberg 		event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
15589dbfbd35SJonas Aaberg 		status = readl(d40c->base->virtbase + D40_DREG_PCBASE +
15599dbfbd35SJonas Aaberg 			       d40c->phy_chan->num * D40_DREG_PCDELTA +
15609dbfbd35SJonas Aaberg 			       D40_CHAN_REG_SSLNK);
15619dbfbd35SJonas Aaberg 	} else {
1562a5ebca47SJonas Aaberg 		dev_err(&d40c->chan.dev->device,
1563a5ebca47SJonas Aaberg 			"[%s] Unknown direction\n", __func__);
1564a5ebca47SJonas Aaberg 		goto _exit;
1565a5ebca47SJonas Aaberg 	}
15669dbfbd35SJonas Aaberg 
1567a5ebca47SJonas Aaberg 	status = (status & D40_EVENTLINE_MASK(event)) >>
1568a5ebca47SJonas Aaberg 		D40_EVENTLINE_POS(event);
1569a5ebca47SJonas Aaberg 
1570a5ebca47SJonas Aaberg 	if (status != D40_DMA_RUN)
1571a5ebca47SJonas Aaberg 		is_paused = true;
1572a5ebca47SJonas Aaberg _exit:
1573a5ebca47SJonas Aaberg 	spin_unlock_irqrestore(&d40c->lock, flags);
1574a5ebca47SJonas Aaberg 	return is_paused;
1575a5ebca47SJonas Aaberg 
1576a5ebca47SJonas Aaberg }
1577a5ebca47SJonas Aaberg 
1578a5ebca47SJonas Aaberg 
15798d318a50SLinus Walleij static u32 stedma40_residue(struct dma_chan *chan)
15808d318a50SLinus Walleij {
15818d318a50SLinus Walleij 	struct d40_chan *d40c =
15828d318a50SLinus Walleij 		container_of(chan, struct d40_chan, chan);
15838d318a50SLinus Walleij 	u32 bytes_left;
15848d318a50SLinus Walleij 	unsigned long flags;
15858d318a50SLinus Walleij 
15868d318a50SLinus Walleij 	spin_lock_irqsave(&d40c->lock, flags);
15878d318a50SLinus Walleij 	bytes_left = d40_residue(d40c);
15888d318a50SLinus Walleij 	spin_unlock_irqrestore(&d40c->lock, flags);
15898d318a50SLinus Walleij 
15908d318a50SLinus Walleij 	return bytes_left;
15918d318a50SLinus Walleij }
15928d318a50SLinus Walleij 
15938d318a50SLinus Walleij struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
15948d318a50SLinus Walleij 						   struct scatterlist *sgl_dst,
15958d318a50SLinus Walleij 						   struct scatterlist *sgl_src,
15968d318a50SLinus Walleij 						   unsigned int sgl_len,
15972a614340SJonas Aaberg 						   unsigned long dma_flags)
15988d318a50SLinus Walleij {
15998d318a50SLinus Walleij 	int res;
16008d318a50SLinus Walleij 	struct d40_desc *d40d;
16018d318a50SLinus Walleij 	struct d40_chan *d40c = container_of(chan, struct d40_chan,
16028d318a50SLinus Walleij 					     chan);
16032a614340SJonas Aaberg 	unsigned long flags;
16048d318a50SLinus Walleij 
16050d0f6b8bSJonas Aaberg 	if (d40c->phy_chan == NULL) {
16060d0f6b8bSJonas Aaberg 		dev_err(&d40c->chan.dev->device,
16070d0f6b8bSJonas Aaberg 			"[%s] Unallocated channel.\n", __func__);
16080d0f6b8bSJonas Aaberg 		return ERR_PTR(-EINVAL);
16090d0f6b8bSJonas Aaberg 	}
16100d0f6b8bSJonas Aaberg 
16112a614340SJonas Aaberg 	spin_lock_irqsave(&d40c->lock, flags);
16128d318a50SLinus Walleij 	d40d = d40_desc_get(d40c);
16138d318a50SLinus Walleij 
16148d318a50SLinus Walleij 	if (d40d == NULL)
16158d318a50SLinus Walleij 		goto err;
16168d318a50SLinus Walleij 
16178d318a50SLinus Walleij 	d40d->lli_len = sgl_len;
1618698e4732SJonas Aaberg 	d40d->lli_current = 0;
16192a614340SJonas Aaberg 	d40d->txd.flags = dma_flags;
16208d318a50SLinus Walleij 
16218d318a50SLinus Walleij 	if (d40c->log_num != D40_PHY_CHAN) {
16228d318a50SLinus Walleij 
16238d318a50SLinus Walleij 		if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
16248d318a50SLinus Walleij 			dev_err(&d40c->chan.dev->device,
16258d318a50SLinus Walleij 				"[%s] Out of memory\n", __func__);
16268d318a50SLinus Walleij 			goto err;
16278d318a50SLinus Walleij 		}
16288d318a50SLinus Walleij 
1629698e4732SJonas Aaberg 		(void) d40_log_sg_to_lli(sgl_src,
16308d318a50SLinus Walleij 					 sgl_len,
16318d318a50SLinus Walleij 					 d40d->lli_log.src,
16328d318a50SLinus Walleij 					 d40c->log_def.lcsp1,
1633698e4732SJonas Aaberg 					 d40c->dma_cfg.src_info.data_width);
16348d318a50SLinus Walleij 
1635698e4732SJonas Aaberg 		(void) d40_log_sg_to_lli(sgl_dst,
16368d318a50SLinus Walleij 					 sgl_len,
16378d318a50SLinus Walleij 					 d40d->lli_log.dst,
16388d318a50SLinus Walleij 					 d40c->log_def.lcsp3,
1639698e4732SJonas Aaberg 					 d40c->dma_cfg.dst_info.data_width);
16408d318a50SLinus Walleij 	} else {
16418d318a50SLinus Walleij 		if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
16428d318a50SLinus Walleij 			dev_err(&d40c->chan.dev->device,
16438d318a50SLinus Walleij 				"[%s] Out of memory\n", __func__);
16448d318a50SLinus Walleij 			goto err;
16458d318a50SLinus Walleij 		}
16468d318a50SLinus Walleij 
16478d318a50SLinus Walleij 		res = d40_phy_sg_to_lli(sgl_src,
16488d318a50SLinus Walleij 					sgl_len,
16498d318a50SLinus Walleij 					0,
16508d318a50SLinus Walleij 					d40d->lli_phy.src,
1651aa182ae2SJonas Aaberg 					virt_to_phys(d40d->lli_phy.src),
16528d318a50SLinus Walleij 					d40c->src_def_cfg,
16538d318a50SLinus Walleij 					d40c->dma_cfg.src_info.data_width,
16540246e77bSJonas Aaberg 					d40c->dma_cfg.src_info.psize);
16558d318a50SLinus Walleij 
16568d318a50SLinus Walleij 		if (res < 0)
16578d318a50SLinus Walleij 			goto err;
16588d318a50SLinus Walleij 
16598d318a50SLinus Walleij 		res = d40_phy_sg_to_lli(sgl_dst,
16608d318a50SLinus Walleij 					sgl_len,
16618d318a50SLinus Walleij 					0,
16628d318a50SLinus Walleij 					d40d->lli_phy.dst,
1663aa182ae2SJonas Aaberg 					virt_to_phys(d40d->lli_phy.dst),
16648d318a50SLinus Walleij 					d40c->dst_def_cfg,
16658d318a50SLinus Walleij 					d40c->dma_cfg.dst_info.data_width,
16660246e77bSJonas Aaberg 					d40c->dma_cfg.dst_info.psize);
16678d318a50SLinus Walleij 
16688d318a50SLinus Walleij 		if (res < 0)
16698d318a50SLinus Walleij 			goto err;
16708d318a50SLinus Walleij 
16718d318a50SLinus Walleij 		(void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
16728d318a50SLinus Walleij 				      d40d->lli_pool.size, DMA_TO_DEVICE);
16738d318a50SLinus Walleij 	}
16748d318a50SLinus Walleij 
16758d318a50SLinus Walleij 	dma_async_tx_descriptor_init(&d40d->txd, chan);
16768d318a50SLinus Walleij 
16778d318a50SLinus Walleij 	d40d->txd.tx_submit = d40_tx_submit;
16788d318a50SLinus Walleij 
16792a614340SJonas Aaberg 	spin_unlock_irqrestore(&d40c->lock, flags);
16808d318a50SLinus Walleij 
16818d318a50SLinus Walleij 	return &d40d->txd;
16828d318a50SLinus Walleij err:
1683819504f4SRabin Vincent 	if (d40d)
1684819504f4SRabin Vincent 		d40_desc_free(d40c, d40d);
16852a614340SJonas Aaberg 	spin_unlock_irqrestore(&d40c->lock, flags);
16868d318a50SLinus Walleij 	return NULL;
16878d318a50SLinus Walleij }
16888d318a50SLinus Walleij EXPORT_SYMBOL(stedma40_memcpy_sg);
16898d318a50SLinus Walleij 
16908d318a50SLinus Walleij bool stedma40_filter(struct dma_chan *chan, void *data)
16918d318a50SLinus Walleij {
16928d318a50SLinus Walleij 	struct stedma40_chan_cfg *info = data;
16938d318a50SLinus Walleij 	struct d40_chan *d40c =
16948d318a50SLinus Walleij 		container_of(chan, struct d40_chan, chan);
16958d318a50SLinus Walleij 	int err;
16968d318a50SLinus Walleij 
16978d318a50SLinus Walleij 	if (data) {
16988d318a50SLinus Walleij 		err = d40_validate_conf(d40c, info);
16998d318a50SLinus Walleij 		if (!err)
17008d318a50SLinus Walleij 			d40c->dma_cfg = *info;
17018d318a50SLinus Walleij 	} else
17028d318a50SLinus Walleij 		err = d40_config_memcpy(d40c);
17038d318a50SLinus Walleij 
1704ce2ca125SRabin Vincent 	if (!err)
1705ce2ca125SRabin Vincent 		d40c->configured = true;
1706ce2ca125SRabin Vincent 
17078d318a50SLinus Walleij 	return err == 0;
17088d318a50SLinus Walleij }
17098d318a50SLinus Walleij EXPORT_SYMBOL(stedma40_filter);
17108d318a50SLinus Walleij 
17118d318a50SLinus Walleij /* DMA ENGINE functions */
17128d318a50SLinus Walleij static int d40_alloc_chan_resources(struct dma_chan *chan)
17138d318a50SLinus Walleij {
17148d318a50SLinus Walleij 	int err;
17158d318a50SLinus Walleij 	unsigned long flags;
17168d318a50SLinus Walleij 	struct d40_chan *d40c =
17178d318a50SLinus Walleij 		container_of(chan, struct d40_chan, chan);
1718ef1872ecSLinus Walleij 	bool is_free_phy;
17198d318a50SLinus Walleij 	spin_lock_irqsave(&d40c->lock, flags);
17208d318a50SLinus Walleij 
17218d318a50SLinus Walleij 	d40c->completed = chan->cookie = 1;
17228d318a50SLinus Walleij 
1723ce2ca125SRabin Vincent 	/* If no dma configuration is set use default configuration (memcpy) */
1724ce2ca125SRabin Vincent 	if (!d40c->configured) {
17258d318a50SLinus Walleij 		err = d40_config_memcpy(d40c);
1726ff0b12baSJonas Aaberg 		if (err) {
1727ff0b12baSJonas Aaberg 			dev_err(&d40c->chan.dev->device,
1728ff0b12baSJonas Aaberg 				"[%s] Failed to configure memcpy channel\n",
1729ff0b12baSJonas Aaberg 				__func__);
1730ff0b12baSJonas Aaberg 			goto fail;
1731ff0b12baSJonas Aaberg 		}
17328d318a50SLinus Walleij 	}
1733ef1872ecSLinus Walleij 	is_free_phy = (d40c->phy_chan == NULL);
17348d318a50SLinus Walleij 
17358d318a50SLinus Walleij 	err = d40_allocate_channel(d40c);
17368d318a50SLinus Walleij 	if (err) {
17378d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
17388d318a50SLinus Walleij 			"[%s] Failed to allocate channel\n", __func__);
1739ff0b12baSJonas Aaberg 		goto fail;
17408d318a50SLinus Walleij 	}
17418d318a50SLinus Walleij 
1742ef1872ecSLinus Walleij 	/* Fill in basic CFG register values */
1743ef1872ecSLinus Walleij 	d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1744ef1872ecSLinus Walleij 		    &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
1745ef1872ecSLinus Walleij 
1746ef1872ecSLinus Walleij 	if (d40c->log_num != D40_PHY_CHAN) {
1747ef1872ecSLinus Walleij 		d40_log_cfg(&d40c->dma_cfg,
1748ef1872ecSLinus Walleij 			    &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1749ef1872ecSLinus Walleij 
1750ef1872ecSLinus Walleij 		if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1751ef1872ecSLinus Walleij 			d40c->lcpa = d40c->base->lcpa_base +
1752ef1872ecSLinus Walleij 			  d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
1753ef1872ecSLinus Walleij 		else
1754ef1872ecSLinus Walleij 			d40c->lcpa = d40c->base->lcpa_base +
1755ef1872ecSLinus Walleij 			  d40c->dma_cfg.dst_dev_type *
1756ef1872ecSLinus Walleij 			  D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
1757ef1872ecSLinus Walleij 	}
1758ef1872ecSLinus Walleij 
1759ef1872ecSLinus Walleij 	/*
1760ef1872ecSLinus Walleij 	 * Only write channel configuration to the DMA if the physical
1761ef1872ecSLinus Walleij 	 * resource is free. In case of multiple logical channels
1762ef1872ecSLinus Walleij 	 * on the same physical resource, only the first write is necessary.
1763ef1872ecSLinus Walleij 	 */
1764b55912c6SJonas Aaberg 	if (is_free_phy)
1765b55912c6SJonas Aaberg 		d40_config_write(d40c);
1766ff0b12baSJonas Aaberg fail:
17678d318a50SLinus Walleij 	spin_unlock_irqrestore(&d40c->lock, flags);
1768ff0b12baSJonas Aaberg 	return err;
17698d318a50SLinus Walleij }
17708d318a50SLinus Walleij 
17718d318a50SLinus Walleij static void d40_free_chan_resources(struct dma_chan *chan)
17728d318a50SLinus Walleij {
17738d318a50SLinus Walleij 	struct d40_chan *d40c =
17748d318a50SLinus Walleij 		container_of(chan, struct d40_chan, chan);
17758d318a50SLinus Walleij 	int err;
17768d318a50SLinus Walleij 	unsigned long flags;
17778d318a50SLinus Walleij 
17780d0f6b8bSJonas Aaberg 	if (d40c->phy_chan == NULL) {
17790d0f6b8bSJonas Aaberg 		dev_err(&d40c->chan.dev->device,
17800d0f6b8bSJonas Aaberg 			"[%s] Cannot free unallocated channel\n", __func__);
17810d0f6b8bSJonas Aaberg 		return;
17820d0f6b8bSJonas Aaberg 	}
17830d0f6b8bSJonas Aaberg 
17840d0f6b8bSJonas Aaberg 
17858d318a50SLinus Walleij 	spin_lock_irqsave(&d40c->lock, flags);
17868d318a50SLinus Walleij 
17878d318a50SLinus Walleij 	err = d40_free_dma(d40c);
17888d318a50SLinus Walleij 
17898d318a50SLinus Walleij 	if (err)
17908d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
17918d318a50SLinus Walleij 			"[%s] Failed to free channel\n", __func__);
17928d318a50SLinus Walleij 	spin_unlock_irqrestore(&d40c->lock, flags);
17938d318a50SLinus Walleij }
17948d318a50SLinus Walleij 
17958d318a50SLinus Walleij static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
17968d318a50SLinus Walleij 						       dma_addr_t dst,
17978d318a50SLinus Walleij 						       dma_addr_t src,
17988d318a50SLinus Walleij 						       size_t size,
17992a614340SJonas Aaberg 						       unsigned long dma_flags)
18008d318a50SLinus Walleij {
18018d318a50SLinus Walleij 	struct d40_desc *d40d;
18028d318a50SLinus Walleij 	struct d40_chan *d40c = container_of(chan, struct d40_chan,
18038d318a50SLinus Walleij 					     chan);
18042a614340SJonas Aaberg 	unsigned long flags;
18058d318a50SLinus Walleij 	int err = 0;
18068d318a50SLinus Walleij 
18070d0f6b8bSJonas Aaberg 	if (d40c->phy_chan == NULL) {
18080d0f6b8bSJonas Aaberg 		dev_err(&d40c->chan.dev->device,
18090d0f6b8bSJonas Aaberg 			"[%s] Channel is not allocated.\n", __func__);
18100d0f6b8bSJonas Aaberg 		return ERR_PTR(-EINVAL);
18110d0f6b8bSJonas Aaberg 	}
18120d0f6b8bSJonas Aaberg 
18132a614340SJonas Aaberg 	spin_lock_irqsave(&d40c->lock, flags);
18148d318a50SLinus Walleij 	d40d = d40_desc_get(d40c);
18158d318a50SLinus Walleij 
18168d318a50SLinus Walleij 	if (d40d == NULL) {
18178d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
18188d318a50SLinus Walleij 			"[%s] Descriptor is NULL\n", __func__);
18198d318a50SLinus Walleij 		goto err;
18208d318a50SLinus Walleij 	}
18218d318a50SLinus Walleij 
18222a614340SJonas Aaberg 	d40d->txd.flags = dma_flags;
18238d318a50SLinus Walleij 
18248d318a50SLinus Walleij 	dma_async_tx_descriptor_init(&d40d->txd, chan);
18258d318a50SLinus Walleij 
18268d318a50SLinus Walleij 	d40d->txd.tx_submit = d40_tx_submit;
18278d318a50SLinus Walleij 
18288d318a50SLinus Walleij 	if (d40c->log_num != D40_PHY_CHAN) {
18298d318a50SLinus Walleij 
18308d318a50SLinus Walleij 		if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
18318d318a50SLinus Walleij 			dev_err(&d40c->chan.dev->device,
18328d318a50SLinus Walleij 				"[%s] Out of memory\n", __func__);
18338d318a50SLinus Walleij 			goto err;
18348d318a50SLinus Walleij 		}
18358d318a50SLinus Walleij 		d40d->lli_len = 1;
1836698e4732SJonas Aaberg 		d40d->lli_current = 0;
18378d318a50SLinus Walleij 
18388d318a50SLinus Walleij 		d40_log_fill_lli(d40d->lli_log.src,
18398d318a50SLinus Walleij 				 src,
18408d318a50SLinus Walleij 				 size,
18418d318a50SLinus Walleij 				 d40c->log_def.lcsp1,
18428d318a50SLinus Walleij 				 d40c->dma_cfg.src_info.data_width,
1843698e4732SJonas Aaberg 				 true);
18448d318a50SLinus Walleij 
18458d318a50SLinus Walleij 		d40_log_fill_lli(d40d->lli_log.dst,
18468d318a50SLinus Walleij 				 dst,
18478d318a50SLinus Walleij 				 size,
18488d318a50SLinus Walleij 				 d40c->log_def.lcsp3,
18498d318a50SLinus Walleij 				 d40c->dma_cfg.dst_info.data_width,
1850698e4732SJonas Aaberg 				 true);
18518d318a50SLinus Walleij 
18528d318a50SLinus Walleij 	} else {
18538d318a50SLinus Walleij 
18548d318a50SLinus Walleij 		if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
18558d318a50SLinus Walleij 			dev_err(&d40c->chan.dev->device,
18568d318a50SLinus Walleij 				"[%s] Out of memory\n", __func__);
18578d318a50SLinus Walleij 			goto err;
18588d318a50SLinus Walleij 		}
18598d318a50SLinus Walleij 
18608d318a50SLinus Walleij 		err = d40_phy_fill_lli(d40d->lli_phy.src,
18618d318a50SLinus Walleij 				       src,
18628d318a50SLinus Walleij 				       size,
18638d318a50SLinus Walleij 				       d40c->dma_cfg.src_info.psize,
18648d318a50SLinus Walleij 				       0,
18658d318a50SLinus Walleij 				       d40c->src_def_cfg,
18668d318a50SLinus Walleij 				       true,
18678d318a50SLinus Walleij 				       d40c->dma_cfg.src_info.data_width,
18688d318a50SLinus Walleij 				       false);
18698d318a50SLinus Walleij 		if (err)
18708d318a50SLinus Walleij 			goto err_fill_lli;
18718d318a50SLinus Walleij 
18728d318a50SLinus Walleij 		err = d40_phy_fill_lli(d40d->lli_phy.dst,
18738d318a50SLinus Walleij 				       dst,
18748d318a50SLinus Walleij 				       size,
18758d318a50SLinus Walleij 				       d40c->dma_cfg.dst_info.psize,
18768d318a50SLinus Walleij 				       0,
18778d318a50SLinus Walleij 				       d40c->dst_def_cfg,
18788d318a50SLinus Walleij 				       true,
18798d318a50SLinus Walleij 				       d40c->dma_cfg.dst_info.data_width,
18808d318a50SLinus Walleij 				       false);
18818d318a50SLinus Walleij 
18828d318a50SLinus Walleij 		if (err)
18838d318a50SLinus Walleij 			goto err_fill_lli;
18848d318a50SLinus Walleij 
18858d318a50SLinus Walleij 		(void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
18868d318a50SLinus Walleij 				      d40d->lli_pool.size, DMA_TO_DEVICE);
18878d318a50SLinus Walleij 	}
18888d318a50SLinus Walleij 
18892a614340SJonas Aaberg 	spin_unlock_irqrestore(&d40c->lock, flags);
18908d318a50SLinus Walleij 	return &d40d->txd;
18918d318a50SLinus Walleij 
18928d318a50SLinus Walleij err_fill_lli:
18938d318a50SLinus Walleij 	dev_err(&d40c->chan.dev->device,
18948d318a50SLinus Walleij 		"[%s] Failed filling in PHY LLI\n", __func__);
18958d318a50SLinus Walleij err:
1896819504f4SRabin Vincent 	if (d40d)
1897819504f4SRabin Vincent 		d40_desc_free(d40c, d40d);
18982a614340SJonas Aaberg 	spin_unlock_irqrestore(&d40c->lock, flags);
18998d318a50SLinus Walleij 	return NULL;
19008d318a50SLinus Walleij }
19018d318a50SLinus Walleij 
19028d318a50SLinus Walleij static int d40_prep_slave_sg_log(struct d40_desc *d40d,
19038d318a50SLinus Walleij 				 struct d40_chan *d40c,
19048d318a50SLinus Walleij 				 struct scatterlist *sgl,
19058d318a50SLinus Walleij 				 unsigned int sg_len,
19068d318a50SLinus Walleij 				 enum dma_data_direction direction,
19072a614340SJonas Aaberg 				 unsigned long dma_flags)
19088d318a50SLinus Walleij {
19098d318a50SLinus Walleij 	dma_addr_t dev_addr = 0;
19108d318a50SLinus Walleij 	int total_size;
19118d318a50SLinus Walleij 
19128d318a50SLinus Walleij 	if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
19138d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
19148d318a50SLinus Walleij 			"[%s] Out of memory\n", __func__);
19158d318a50SLinus Walleij 		return -ENOMEM;
19168d318a50SLinus Walleij 	}
19178d318a50SLinus Walleij 
19188d318a50SLinus Walleij 	d40d->lli_len = sg_len;
1919698e4732SJonas Aaberg 	d40d->lli_current = 0;
19208d318a50SLinus Walleij 
19212a614340SJonas Aaberg 	if (direction == DMA_FROM_DEVICE)
192295e1400fSLinus Walleij 		if (d40c->runtime_addr)
192395e1400fSLinus Walleij 			dev_addr = d40c->runtime_addr;
192495e1400fSLinus Walleij 		else
19258d318a50SLinus Walleij 			dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
19262a614340SJonas Aaberg 	else if (direction == DMA_TO_DEVICE)
192795e1400fSLinus Walleij 		if (d40c->runtime_addr)
192895e1400fSLinus Walleij 			dev_addr = d40c->runtime_addr;
192995e1400fSLinus Walleij 		else
19308d318a50SLinus Walleij 			dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
193195e1400fSLinus Walleij 
19322a614340SJonas Aaberg 	else
19332a614340SJonas Aaberg 		return -EINVAL;
19342a614340SJonas Aaberg 
1935698e4732SJonas Aaberg 	total_size = d40_log_sg_to_dev(sgl, sg_len,
19368d318a50SLinus Walleij 				       &d40d->lli_log,
19378d318a50SLinus Walleij 				       &d40c->log_def,
19388d318a50SLinus Walleij 				       d40c->dma_cfg.src_info.data_width,
19398d318a50SLinus Walleij 				       d40c->dma_cfg.dst_info.data_width,
19408d318a50SLinus Walleij 				       direction,
1941698e4732SJonas Aaberg 				       dev_addr);
19422a614340SJonas Aaberg 
19438d318a50SLinus Walleij 	if (total_size < 0)
19448d318a50SLinus Walleij 		return -EINVAL;
19458d318a50SLinus Walleij 
19468d318a50SLinus Walleij 	return 0;
19478d318a50SLinus Walleij }
19488d318a50SLinus Walleij 
19498d318a50SLinus Walleij static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
19508d318a50SLinus Walleij 				 struct d40_chan *d40c,
19518d318a50SLinus Walleij 				 struct scatterlist *sgl,
19528d318a50SLinus Walleij 				 unsigned int sgl_len,
19538d318a50SLinus Walleij 				 enum dma_data_direction direction,
19542a614340SJonas Aaberg 				 unsigned long dma_flags)
19558d318a50SLinus Walleij {
19568d318a50SLinus Walleij 	dma_addr_t src_dev_addr;
19578d318a50SLinus Walleij 	dma_addr_t dst_dev_addr;
19588d318a50SLinus Walleij 	int res;
19598d318a50SLinus Walleij 
19608d318a50SLinus Walleij 	if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
19618d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
19628d318a50SLinus Walleij 			"[%s] Out of memory\n", __func__);
19638d318a50SLinus Walleij 		return -ENOMEM;
19648d318a50SLinus Walleij 	}
19658d318a50SLinus Walleij 
19668d318a50SLinus Walleij 	d40d->lli_len = sgl_len;
1967698e4732SJonas Aaberg 	d40d->lli_current = 0;
19688d318a50SLinus Walleij 
19698d318a50SLinus Walleij 	if (direction == DMA_FROM_DEVICE) {
19708d318a50SLinus Walleij 		dst_dev_addr = 0;
197195e1400fSLinus Walleij 		if (d40c->runtime_addr)
197295e1400fSLinus Walleij 			src_dev_addr = d40c->runtime_addr;
197395e1400fSLinus Walleij 		else
19748d318a50SLinus Walleij 			src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
19758d318a50SLinus Walleij 	} else if (direction == DMA_TO_DEVICE) {
197695e1400fSLinus Walleij 		if (d40c->runtime_addr)
197795e1400fSLinus Walleij 			dst_dev_addr = d40c->runtime_addr;
197895e1400fSLinus Walleij 		else
19798d318a50SLinus Walleij 			dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
19808d318a50SLinus Walleij 		src_dev_addr = 0;
19818d318a50SLinus Walleij 	} else
19828d318a50SLinus Walleij 		return -EINVAL;
19838d318a50SLinus Walleij 
19848d318a50SLinus Walleij 	res = d40_phy_sg_to_lli(sgl,
19858d318a50SLinus Walleij 				sgl_len,
19868d318a50SLinus Walleij 				src_dev_addr,
19878d318a50SLinus Walleij 				d40d->lli_phy.src,
1988aa182ae2SJonas Aaberg 				virt_to_phys(d40d->lli_phy.src),
19898d318a50SLinus Walleij 				d40c->src_def_cfg,
19908d318a50SLinus Walleij 				d40c->dma_cfg.src_info.data_width,
19910246e77bSJonas Aaberg 				d40c->dma_cfg.src_info.psize);
19928d318a50SLinus Walleij 	if (res < 0)
19938d318a50SLinus Walleij 		return res;
19948d318a50SLinus Walleij 
19958d318a50SLinus Walleij 	res = d40_phy_sg_to_lli(sgl,
19968d318a50SLinus Walleij 				sgl_len,
19978d318a50SLinus Walleij 				dst_dev_addr,
19988d318a50SLinus Walleij 				d40d->lli_phy.dst,
1999aa182ae2SJonas Aaberg 				virt_to_phys(d40d->lli_phy.dst),
20008d318a50SLinus Walleij 				d40c->dst_def_cfg,
20018d318a50SLinus Walleij 				d40c->dma_cfg.dst_info.data_width,
20020246e77bSJonas Aaberg 				d40c->dma_cfg.dst_info.psize);
20038d318a50SLinus Walleij 	if (res < 0)
20048d318a50SLinus Walleij 		return res;
20058d318a50SLinus Walleij 
20068d318a50SLinus Walleij 	(void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
20078d318a50SLinus Walleij 			      d40d->lli_pool.size, DMA_TO_DEVICE);
20088d318a50SLinus Walleij 	return 0;
20098d318a50SLinus Walleij }
20108d318a50SLinus Walleij 
20118d318a50SLinus Walleij static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
20128d318a50SLinus Walleij 							 struct scatterlist *sgl,
20138d318a50SLinus Walleij 							 unsigned int sg_len,
20148d318a50SLinus Walleij 							 enum dma_data_direction direction,
20152a614340SJonas Aaberg 							 unsigned long dma_flags)
20168d318a50SLinus Walleij {
20178d318a50SLinus Walleij 	struct d40_desc *d40d;
20188d318a50SLinus Walleij 	struct d40_chan *d40c = container_of(chan, struct d40_chan,
20198d318a50SLinus Walleij 					     chan);
20202a614340SJonas Aaberg 	unsigned long flags;
20218d318a50SLinus Walleij 	int err;
20228d318a50SLinus Walleij 
20230d0f6b8bSJonas Aaberg 	if (d40c->phy_chan == NULL) {
20240d0f6b8bSJonas Aaberg 		dev_err(&d40c->chan.dev->device,
20250d0f6b8bSJonas Aaberg 			"[%s] Cannot prepare unallocated channel\n", __func__);
20260d0f6b8bSJonas Aaberg 		return ERR_PTR(-EINVAL);
20270d0f6b8bSJonas Aaberg 	}
20280d0f6b8bSJonas Aaberg 
20292a614340SJonas Aaberg 	spin_lock_irqsave(&d40c->lock, flags);
20308d318a50SLinus Walleij 	d40d = d40_desc_get(d40c);
20318d318a50SLinus Walleij 
20328d318a50SLinus Walleij 	if (d40d == NULL)
2033819504f4SRabin Vincent 		goto err;
20348d318a50SLinus Walleij 
20358d318a50SLinus Walleij 	if (d40c->log_num != D40_PHY_CHAN)
20368d318a50SLinus Walleij 		err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
20372a614340SJonas Aaberg 					    direction, dma_flags);
20388d318a50SLinus Walleij 	else
20398d318a50SLinus Walleij 		err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
20402a614340SJonas Aaberg 					    direction, dma_flags);
20418d318a50SLinus Walleij 	if (err) {
20428d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
20438d318a50SLinus Walleij 			"[%s] Failed to prepare %s slave sg job: %d\n",
20448d318a50SLinus Walleij 			__func__,
20458d318a50SLinus Walleij 			d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
2046819504f4SRabin Vincent 		goto err;
20478d318a50SLinus Walleij 	}
20488d318a50SLinus Walleij 
20492a614340SJonas Aaberg 	d40d->txd.flags = dma_flags;
20508d318a50SLinus Walleij 
20518d318a50SLinus Walleij 	dma_async_tx_descriptor_init(&d40d->txd, chan);
20528d318a50SLinus Walleij 
20538d318a50SLinus Walleij 	d40d->txd.tx_submit = d40_tx_submit;
20548d318a50SLinus Walleij 
2055819504f4SRabin Vincent 	spin_unlock_irqrestore(&d40c->lock, flags);
20568d318a50SLinus Walleij 	return &d40d->txd;
2057819504f4SRabin Vincent 
2058819504f4SRabin Vincent err:
2059819504f4SRabin Vincent 	if (d40d)
2060819504f4SRabin Vincent 		d40_desc_free(d40c, d40d);
2061819504f4SRabin Vincent 	spin_unlock_irqrestore(&d40c->lock, flags);
2062819504f4SRabin Vincent 	return NULL;
20638d318a50SLinus Walleij }
20648d318a50SLinus Walleij 
20658d318a50SLinus Walleij static enum dma_status d40_tx_status(struct dma_chan *chan,
20668d318a50SLinus Walleij 				     dma_cookie_t cookie,
20678d318a50SLinus Walleij 				     struct dma_tx_state *txstate)
20688d318a50SLinus Walleij {
20698d318a50SLinus Walleij 	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
20708d318a50SLinus Walleij 	dma_cookie_t last_used;
20718d318a50SLinus Walleij 	dma_cookie_t last_complete;
20728d318a50SLinus Walleij 	int ret;
20738d318a50SLinus Walleij 
20740d0f6b8bSJonas Aaberg 	if (d40c->phy_chan == NULL) {
20750d0f6b8bSJonas Aaberg 		dev_err(&d40c->chan.dev->device,
20760d0f6b8bSJonas Aaberg 			"[%s] Cannot read status of unallocated channel\n",
20770d0f6b8bSJonas Aaberg 			__func__);
20780d0f6b8bSJonas Aaberg 		return -EINVAL;
20790d0f6b8bSJonas Aaberg 	}
20800d0f6b8bSJonas Aaberg 
20818d318a50SLinus Walleij 	last_complete = d40c->completed;
20828d318a50SLinus Walleij 	last_used = chan->cookie;
20838d318a50SLinus Walleij 
2084a5ebca47SJonas Aaberg 	if (d40_is_paused(d40c))
2085a5ebca47SJonas Aaberg 		ret = DMA_PAUSED;
2086a5ebca47SJonas Aaberg 	else
20878d318a50SLinus Walleij 		ret = dma_async_is_complete(cookie, last_complete, last_used);
20888d318a50SLinus Walleij 
2089a5ebca47SJonas Aaberg 	dma_set_tx_state(txstate, last_complete, last_used,
2090a5ebca47SJonas Aaberg 			 stedma40_residue(chan));
20918d318a50SLinus Walleij 
20928d318a50SLinus Walleij 	return ret;
20938d318a50SLinus Walleij }
20948d318a50SLinus Walleij 
20958d318a50SLinus Walleij static void d40_issue_pending(struct dma_chan *chan)
20968d318a50SLinus Walleij {
20978d318a50SLinus Walleij 	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
20988d318a50SLinus Walleij 	unsigned long flags;
20998d318a50SLinus Walleij 
21000d0f6b8bSJonas Aaberg 	if (d40c->phy_chan == NULL) {
21010d0f6b8bSJonas Aaberg 		dev_err(&d40c->chan.dev->device,
21020d0f6b8bSJonas Aaberg 			"[%s] Channel is not allocated!\n", __func__);
21030d0f6b8bSJonas Aaberg 		return;
21040d0f6b8bSJonas Aaberg 	}
21050d0f6b8bSJonas Aaberg 
21068d318a50SLinus Walleij 	spin_lock_irqsave(&d40c->lock, flags);
21078d318a50SLinus Walleij 
21088d318a50SLinus Walleij 	/* Busy means that pending jobs are already being processed */
21098d318a50SLinus Walleij 	if (!d40c->busy)
21108d318a50SLinus Walleij 		(void) d40_queue_start(d40c);
21118d318a50SLinus Walleij 
21128d318a50SLinus Walleij 	spin_unlock_irqrestore(&d40c->lock, flags);
21138d318a50SLinus Walleij }
21148d318a50SLinus Walleij 
211595e1400fSLinus Walleij /* Runtime reconfiguration extension */
211695e1400fSLinus Walleij static void d40_set_runtime_config(struct dma_chan *chan,
211795e1400fSLinus Walleij 			       struct dma_slave_config *config)
211895e1400fSLinus Walleij {
211995e1400fSLinus Walleij 	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
212095e1400fSLinus Walleij 	struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
212195e1400fSLinus Walleij 	enum dma_slave_buswidth config_addr_width;
212295e1400fSLinus Walleij 	dma_addr_t config_addr;
212395e1400fSLinus Walleij 	u32 config_maxburst;
212495e1400fSLinus Walleij 	enum stedma40_periph_data_width addr_width;
212595e1400fSLinus Walleij 	int psize;
212695e1400fSLinus Walleij 
212795e1400fSLinus Walleij 	if (config->direction == DMA_FROM_DEVICE) {
212895e1400fSLinus Walleij 		dma_addr_t dev_addr_rx =
212995e1400fSLinus Walleij 			d40c->base->plat_data->dev_rx[cfg->src_dev_type];
213095e1400fSLinus Walleij 
213195e1400fSLinus Walleij 		config_addr = config->src_addr;
213295e1400fSLinus Walleij 		if (dev_addr_rx)
213395e1400fSLinus Walleij 			dev_dbg(d40c->base->dev,
213495e1400fSLinus Walleij 				"channel has a pre-wired RX address %08x "
213595e1400fSLinus Walleij 				"overriding with %08x\n",
213695e1400fSLinus Walleij 				dev_addr_rx, config_addr);
213795e1400fSLinus Walleij 		if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
213895e1400fSLinus Walleij 			dev_dbg(d40c->base->dev,
213995e1400fSLinus Walleij 				"channel was not configured for peripheral "
214095e1400fSLinus Walleij 				"to memory transfer (%d) overriding\n",
214195e1400fSLinus Walleij 				cfg->dir);
214295e1400fSLinus Walleij 		cfg->dir = STEDMA40_PERIPH_TO_MEM;
214395e1400fSLinus Walleij 
214495e1400fSLinus Walleij 		config_addr_width = config->src_addr_width;
214595e1400fSLinus Walleij 		config_maxburst = config->src_maxburst;
214695e1400fSLinus Walleij 
214795e1400fSLinus Walleij 	} else if (config->direction == DMA_TO_DEVICE) {
214895e1400fSLinus Walleij 		dma_addr_t dev_addr_tx =
214995e1400fSLinus Walleij 			d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
215095e1400fSLinus Walleij 
215195e1400fSLinus Walleij 		config_addr = config->dst_addr;
215295e1400fSLinus Walleij 		if (dev_addr_tx)
215395e1400fSLinus Walleij 			dev_dbg(d40c->base->dev,
215495e1400fSLinus Walleij 				"channel has a pre-wired TX address %08x "
215595e1400fSLinus Walleij 				"overriding with %08x\n",
215695e1400fSLinus Walleij 				dev_addr_tx, config_addr);
215795e1400fSLinus Walleij 		if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
215895e1400fSLinus Walleij 			dev_dbg(d40c->base->dev,
215995e1400fSLinus Walleij 				"channel was not configured for memory "
216095e1400fSLinus Walleij 				"to peripheral transfer (%d) overriding\n",
216195e1400fSLinus Walleij 				cfg->dir);
216295e1400fSLinus Walleij 		cfg->dir = STEDMA40_MEM_TO_PERIPH;
216395e1400fSLinus Walleij 
216495e1400fSLinus Walleij 		config_addr_width = config->dst_addr_width;
216595e1400fSLinus Walleij 		config_maxburst = config->dst_maxburst;
216695e1400fSLinus Walleij 
216795e1400fSLinus Walleij 	} else {
216895e1400fSLinus Walleij 		dev_err(d40c->base->dev,
216995e1400fSLinus Walleij 			"unrecognized channel direction %d\n",
217095e1400fSLinus Walleij 			config->direction);
217195e1400fSLinus Walleij 		return;
217295e1400fSLinus Walleij 	}
217395e1400fSLinus Walleij 
217495e1400fSLinus Walleij 	switch (config_addr_width) {
217595e1400fSLinus Walleij 	case DMA_SLAVE_BUSWIDTH_1_BYTE:
217695e1400fSLinus Walleij 		addr_width = STEDMA40_BYTE_WIDTH;
217795e1400fSLinus Walleij 		break;
217895e1400fSLinus Walleij 	case DMA_SLAVE_BUSWIDTH_2_BYTES:
217995e1400fSLinus Walleij 		addr_width = STEDMA40_HALFWORD_WIDTH;
218095e1400fSLinus Walleij 		break;
218195e1400fSLinus Walleij 	case DMA_SLAVE_BUSWIDTH_4_BYTES:
218295e1400fSLinus Walleij 		addr_width = STEDMA40_WORD_WIDTH;
218395e1400fSLinus Walleij 		break;
218495e1400fSLinus Walleij 	case DMA_SLAVE_BUSWIDTH_8_BYTES:
218595e1400fSLinus Walleij 		addr_width = STEDMA40_DOUBLEWORD_WIDTH;
218695e1400fSLinus Walleij 		break;
218795e1400fSLinus Walleij 	default:
218895e1400fSLinus Walleij 		dev_err(d40c->base->dev,
218995e1400fSLinus Walleij 			"illegal peripheral address width "
219095e1400fSLinus Walleij 			"requested (%d)\n",
219195e1400fSLinus Walleij 			config->src_addr_width);
219295e1400fSLinus Walleij 		return;
219395e1400fSLinus Walleij 	}
219495e1400fSLinus Walleij 
2195a59670a4SPer Forlin 	if (d40c->log_num != D40_PHY_CHAN) {
219695e1400fSLinus Walleij 		if (config_maxburst >= 16)
219795e1400fSLinus Walleij 			psize = STEDMA40_PSIZE_LOG_16;
219895e1400fSLinus Walleij 		else if (config_maxburst >= 8)
219995e1400fSLinus Walleij 			psize = STEDMA40_PSIZE_LOG_8;
220095e1400fSLinus Walleij 		else if (config_maxburst >= 4)
220195e1400fSLinus Walleij 			psize = STEDMA40_PSIZE_LOG_4;
220295e1400fSLinus Walleij 		else
220395e1400fSLinus Walleij 			psize = STEDMA40_PSIZE_LOG_1;
2204a59670a4SPer Forlin 	} else {
2205a59670a4SPer Forlin 		if (config_maxburst >= 16)
2206a59670a4SPer Forlin 			psize = STEDMA40_PSIZE_PHY_16;
2207a59670a4SPer Forlin 		else if (config_maxburst >= 8)
2208a59670a4SPer Forlin 			psize = STEDMA40_PSIZE_PHY_8;
2209a59670a4SPer Forlin 		else if (config_maxburst >= 4)
2210a59670a4SPer Forlin 			psize = STEDMA40_PSIZE_PHY_4;
2211a59670a4SPer Forlin 		else
2212a59670a4SPer Forlin 			psize = STEDMA40_PSIZE_PHY_1;
2213a59670a4SPer Forlin 	}
221495e1400fSLinus Walleij 
221595e1400fSLinus Walleij 	/* Set up all the endpoint configs */
221695e1400fSLinus Walleij 	cfg->src_info.data_width = addr_width;
221795e1400fSLinus Walleij 	cfg->src_info.psize = psize;
221895e1400fSLinus Walleij 	cfg->src_info.endianess = STEDMA40_LITTLE_ENDIAN;
221995e1400fSLinus Walleij 	cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
222095e1400fSLinus Walleij 	cfg->dst_info.data_width = addr_width;
222195e1400fSLinus Walleij 	cfg->dst_info.psize = psize;
222295e1400fSLinus Walleij 	cfg->dst_info.endianess = STEDMA40_LITTLE_ENDIAN;
222395e1400fSLinus Walleij 	cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
222495e1400fSLinus Walleij 
2225a59670a4SPer Forlin 	/* Fill in register values */
2226a59670a4SPer Forlin 	if (d40c->log_num != D40_PHY_CHAN)
2227a59670a4SPer Forlin 		d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2228a59670a4SPer Forlin 	else
2229a59670a4SPer Forlin 		d40_phy_cfg(cfg, &d40c->src_def_cfg,
2230a59670a4SPer Forlin 			    &d40c->dst_def_cfg, false);
2231a59670a4SPer Forlin 
223295e1400fSLinus Walleij 	/* These settings will take precedence later */
223395e1400fSLinus Walleij 	d40c->runtime_addr = config_addr;
223495e1400fSLinus Walleij 	d40c->runtime_direction = config->direction;
223595e1400fSLinus Walleij 	dev_dbg(d40c->base->dev,
223695e1400fSLinus Walleij 		"configured channel %s for %s, data width %d, "
223795e1400fSLinus Walleij 		"maxburst %d bytes, LE, no flow control\n",
223895e1400fSLinus Walleij 		dma_chan_name(chan),
223995e1400fSLinus Walleij 		(config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
224095e1400fSLinus Walleij 		config_addr_width,
224195e1400fSLinus Walleij 		config_maxburst);
224295e1400fSLinus Walleij }
224395e1400fSLinus Walleij 
224405827630SLinus Walleij static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
224505827630SLinus Walleij 		       unsigned long arg)
22468d318a50SLinus Walleij {
22478d318a50SLinus Walleij 	unsigned long flags;
22488d318a50SLinus Walleij 	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
22498d318a50SLinus Walleij 
22500d0f6b8bSJonas Aaberg 	if (d40c->phy_chan == NULL) {
22510d0f6b8bSJonas Aaberg 		dev_err(&d40c->chan.dev->device,
22520d0f6b8bSJonas Aaberg 			"[%s] Channel is not allocated!\n", __func__);
22530d0f6b8bSJonas Aaberg 		return -EINVAL;
22540d0f6b8bSJonas Aaberg 	}
22550d0f6b8bSJonas Aaberg 
22568d318a50SLinus Walleij 	switch (cmd) {
22578d318a50SLinus Walleij 	case DMA_TERMINATE_ALL:
22588d318a50SLinus Walleij 		spin_lock_irqsave(&d40c->lock, flags);
22598d318a50SLinus Walleij 		d40_term_all(d40c);
22608d318a50SLinus Walleij 		spin_unlock_irqrestore(&d40c->lock, flags);
22618d318a50SLinus Walleij 		return 0;
22628d318a50SLinus Walleij 	case DMA_PAUSE:
22638d318a50SLinus Walleij 		return d40_pause(chan);
22648d318a50SLinus Walleij 	case DMA_RESUME:
22658d318a50SLinus Walleij 		return d40_resume(chan);
226695e1400fSLinus Walleij 	case DMA_SLAVE_CONFIG:
226795e1400fSLinus Walleij 		d40_set_runtime_config(chan,
226895e1400fSLinus Walleij 			(struct dma_slave_config *) arg);
226995e1400fSLinus Walleij 		return 0;
227095e1400fSLinus Walleij 	default:
227195e1400fSLinus Walleij 		break;
22728d318a50SLinus Walleij 	}
22738d318a50SLinus Walleij 
22748d318a50SLinus Walleij 	/* Other commands are unimplemented */
22758d318a50SLinus Walleij 	return -ENXIO;
22768d318a50SLinus Walleij }
22778d318a50SLinus Walleij 
22788d318a50SLinus Walleij /* Initialization functions */
22798d318a50SLinus Walleij 
22808d318a50SLinus Walleij static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
22818d318a50SLinus Walleij 				 struct d40_chan *chans, int offset,
22828d318a50SLinus Walleij 				 int num_chans)
22838d318a50SLinus Walleij {
22848d318a50SLinus Walleij 	int i = 0;
22858d318a50SLinus Walleij 	struct d40_chan *d40c;
22868d318a50SLinus Walleij 
22878d318a50SLinus Walleij 	INIT_LIST_HEAD(&dma->channels);
22888d318a50SLinus Walleij 
22898d318a50SLinus Walleij 	for (i = offset; i < offset + num_chans; i++) {
22908d318a50SLinus Walleij 		d40c = &chans[i];
22918d318a50SLinus Walleij 		d40c->base = base;
22928d318a50SLinus Walleij 		d40c->chan.device = dma;
22938d318a50SLinus Walleij 
22948d318a50SLinus Walleij 		spin_lock_init(&d40c->lock);
22958d318a50SLinus Walleij 
22968d318a50SLinus Walleij 		d40c->log_num = D40_PHY_CHAN;
22978d318a50SLinus Walleij 
22988d318a50SLinus Walleij 		INIT_LIST_HEAD(&d40c->active);
22998d318a50SLinus Walleij 		INIT_LIST_HEAD(&d40c->queue);
23008d318a50SLinus Walleij 		INIT_LIST_HEAD(&d40c->client);
23018d318a50SLinus Walleij 
23028d318a50SLinus Walleij 		tasklet_init(&d40c->tasklet, dma_tasklet,
23038d318a50SLinus Walleij 			     (unsigned long) d40c);
23048d318a50SLinus Walleij 
23058d318a50SLinus Walleij 		list_add_tail(&d40c->chan.device_node,
23068d318a50SLinus Walleij 			      &dma->channels);
23078d318a50SLinus Walleij 	}
23088d318a50SLinus Walleij }
23098d318a50SLinus Walleij 
23108d318a50SLinus Walleij static int __init d40_dmaengine_init(struct d40_base *base,
23118d318a50SLinus Walleij 				     int num_reserved_chans)
23128d318a50SLinus Walleij {
23138d318a50SLinus Walleij 	int err ;
23148d318a50SLinus Walleij 
23158d318a50SLinus Walleij 	d40_chan_init(base, &base->dma_slave, base->log_chans,
23168d318a50SLinus Walleij 		      0, base->num_log_chans);
23178d318a50SLinus Walleij 
23188d318a50SLinus Walleij 	dma_cap_zero(base->dma_slave.cap_mask);
23198d318a50SLinus Walleij 	dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
23208d318a50SLinus Walleij 
23218d318a50SLinus Walleij 	base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
23228d318a50SLinus Walleij 	base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
23238d318a50SLinus Walleij 	base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
23248d318a50SLinus Walleij 	base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
23258d318a50SLinus Walleij 	base->dma_slave.device_tx_status = d40_tx_status;
23268d318a50SLinus Walleij 	base->dma_slave.device_issue_pending = d40_issue_pending;
23278d318a50SLinus Walleij 	base->dma_slave.device_control = d40_control;
23288d318a50SLinus Walleij 	base->dma_slave.dev = base->dev;
23298d318a50SLinus Walleij 
23308d318a50SLinus Walleij 	err = dma_async_device_register(&base->dma_slave);
23318d318a50SLinus Walleij 
23328d318a50SLinus Walleij 	if (err) {
23338d318a50SLinus Walleij 		dev_err(base->dev,
23348d318a50SLinus Walleij 			"[%s] Failed to register slave channels\n",
23358d318a50SLinus Walleij 			__func__);
23368d318a50SLinus Walleij 		goto failure1;
23378d318a50SLinus Walleij 	}
23388d318a50SLinus Walleij 
23398d318a50SLinus Walleij 	d40_chan_init(base, &base->dma_memcpy, base->log_chans,
23408d318a50SLinus Walleij 		      base->num_log_chans, base->plat_data->memcpy_len);
23418d318a50SLinus Walleij 
23428d318a50SLinus Walleij 	dma_cap_zero(base->dma_memcpy.cap_mask);
23438d318a50SLinus Walleij 	dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
23448d318a50SLinus Walleij 
23458d318a50SLinus Walleij 	base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
23468d318a50SLinus Walleij 	base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
23478d318a50SLinus Walleij 	base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
23488d318a50SLinus Walleij 	base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
23498d318a50SLinus Walleij 	base->dma_memcpy.device_tx_status = d40_tx_status;
23508d318a50SLinus Walleij 	base->dma_memcpy.device_issue_pending = d40_issue_pending;
23518d318a50SLinus Walleij 	base->dma_memcpy.device_control = d40_control;
23528d318a50SLinus Walleij 	base->dma_memcpy.dev = base->dev;
23538d318a50SLinus Walleij 	/*
23548d318a50SLinus Walleij 	 * This controller can only access address at even
23558d318a50SLinus Walleij 	 * 32bit boundaries, i.e. 2^2
23568d318a50SLinus Walleij 	 */
23578d318a50SLinus Walleij 	base->dma_memcpy.copy_align = 2;
23588d318a50SLinus Walleij 
23598d318a50SLinus Walleij 	err = dma_async_device_register(&base->dma_memcpy);
23608d318a50SLinus Walleij 
23618d318a50SLinus Walleij 	if (err) {
23628d318a50SLinus Walleij 		dev_err(base->dev,
23638d318a50SLinus Walleij 			"[%s] Failed to regsiter memcpy only channels\n",
23648d318a50SLinus Walleij 			__func__);
23658d318a50SLinus Walleij 		goto failure2;
23668d318a50SLinus Walleij 	}
23678d318a50SLinus Walleij 
23688d318a50SLinus Walleij 	d40_chan_init(base, &base->dma_both, base->phy_chans,
23698d318a50SLinus Walleij 		      0, num_reserved_chans);
23708d318a50SLinus Walleij 
23718d318a50SLinus Walleij 	dma_cap_zero(base->dma_both.cap_mask);
23728d318a50SLinus Walleij 	dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
23738d318a50SLinus Walleij 	dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
23748d318a50SLinus Walleij 
23758d318a50SLinus Walleij 	base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
23768d318a50SLinus Walleij 	base->dma_both.device_free_chan_resources = d40_free_chan_resources;
23778d318a50SLinus Walleij 	base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
23788d318a50SLinus Walleij 	base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
23798d318a50SLinus Walleij 	base->dma_both.device_tx_status = d40_tx_status;
23808d318a50SLinus Walleij 	base->dma_both.device_issue_pending = d40_issue_pending;
23818d318a50SLinus Walleij 	base->dma_both.device_control = d40_control;
23828d318a50SLinus Walleij 	base->dma_both.dev = base->dev;
23838d318a50SLinus Walleij 	base->dma_both.copy_align = 2;
23848d318a50SLinus Walleij 	err = dma_async_device_register(&base->dma_both);
23858d318a50SLinus Walleij 
23868d318a50SLinus Walleij 	if (err) {
23878d318a50SLinus Walleij 		dev_err(base->dev,
23888d318a50SLinus Walleij 			"[%s] Failed to register logical and physical capable channels\n",
23898d318a50SLinus Walleij 			__func__);
23908d318a50SLinus Walleij 		goto failure3;
23918d318a50SLinus Walleij 	}
23928d318a50SLinus Walleij 	return 0;
23938d318a50SLinus Walleij failure3:
23948d318a50SLinus Walleij 	dma_async_device_unregister(&base->dma_memcpy);
23958d318a50SLinus Walleij failure2:
23968d318a50SLinus Walleij 	dma_async_device_unregister(&base->dma_slave);
23978d318a50SLinus Walleij failure1:
23988d318a50SLinus Walleij 	return err;
23998d318a50SLinus Walleij }
24008d318a50SLinus Walleij 
24018d318a50SLinus Walleij /* Initialization functions. */
24028d318a50SLinus Walleij 
24038d318a50SLinus Walleij static int __init d40_phy_res_init(struct d40_base *base)
24048d318a50SLinus Walleij {
24058d318a50SLinus Walleij 	int i;
24068d318a50SLinus Walleij 	int num_phy_chans_avail = 0;
24078d318a50SLinus Walleij 	u32 val[2];
24088d318a50SLinus Walleij 	int odd_even_bit = -2;
24098d318a50SLinus Walleij 
24108d318a50SLinus Walleij 	val[0] = readl(base->virtbase + D40_DREG_PRSME);
24118d318a50SLinus Walleij 	val[1] = readl(base->virtbase + D40_DREG_PRSMO);
24128d318a50SLinus Walleij 
24138d318a50SLinus Walleij 	for (i = 0; i < base->num_phy_chans; i++) {
24148d318a50SLinus Walleij 		base->phy_res[i].num = i;
24158d318a50SLinus Walleij 		odd_even_bit += 2 * ((i % 2) == 0);
24168d318a50SLinus Walleij 		if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
24178d318a50SLinus Walleij 			/* Mark security only channels as occupied */
24188d318a50SLinus Walleij 			base->phy_res[i].allocated_src = D40_ALLOC_PHY;
24198d318a50SLinus Walleij 			base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
24208d318a50SLinus Walleij 		} else {
24218d318a50SLinus Walleij 			base->phy_res[i].allocated_src = D40_ALLOC_FREE;
24228d318a50SLinus Walleij 			base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
24238d318a50SLinus Walleij 			num_phy_chans_avail++;
24248d318a50SLinus Walleij 		}
24258d318a50SLinus Walleij 		spin_lock_init(&base->phy_res[i].lock);
24268d318a50SLinus Walleij 	}
24276b7acd84SJonas Aaberg 
24286b7acd84SJonas Aaberg 	/* Mark disabled channels as occupied */
24296b7acd84SJonas Aaberg 	for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
2430f57b407cSRabin Vincent 		int chan = base->plat_data->disabled_channels[i];
2431f57b407cSRabin Vincent 
2432f57b407cSRabin Vincent 		base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
2433f57b407cSRabin Vincent 		base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
24346b7acd84SJonas Aaberg 		num_phy_chans_avail--;
24356b7acd84SJonas Aaberg 	}
24366b7acd84SJonas Aaberg 
24378d318a50SLinus Walleij 	dev_info(base->dev, "%d of %d physical DMA channels available\n",
24388d318a50SLinus Walleij 		 num_phy_chans_avail, base->num_phy_chans);
24398d318a50SLinus Walleij 
24408d318a50SLinus Walleij 	/* Verify settings extended vs standard */
24418d318a50SLinus Walleij 	val[0] = readl(base->virtbase + D40_DREG_PRTYP);
24428d318a50SLinus Walleij 
24438d318a50SLinus Walleij 	for (i = 0; i < base->num_phy_chans; i++) {
24448d318a50SLinus Walleij 
24458d318a50SLinus Walleij 		if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
24468d318a50SLinus Walleij 		    (val[0] & 0x3) != 1)
24478d318a50SLinus Walleij 			dev_info(base->dev,
24488d318a50SLinus Walleij 				 "[%s] INFO: channel %d is misconfigured (%d)\n",
24498d318a50SLinus Walleij 				 __func__, i, val[0] & 0x3);
24508d318a50SLinus Walleij 
24518d318a50SLinus Walleij 		val[0] = val[0] >> 2;
24528d318a50SLinus Walleij 	}
24538d318a50SLinus Walleij 
24548d318a50SLinus Walleij 	return num_phy_chans_avail;
24558d318a50SLinus Walleij }
24568d318a50SLinus Walleij 
24578d318a50SLinus Walleij static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
24588d318a50SLinus Walleij {
24598d318a50SLinus Walleij 	static const struct d40_reg_val dma_id_regs[] = {
24608d318a50SLinus Walleij 		/* Peripheral Id */
24618d318a50SLinus Walleij 		{ .reg = D40_DREG_PERIPHID0, .val = 0x0040},
24628d318a50SLinus Walleij 		{ .reg = D40_DREG_PERIPHID1, .val = 0x0000},
24638d318a50SLinus Walleij 		/*
24648d318a50SLinus Walleij 		 * D40_DREG_PERIPHID2 Depends on HW revision:
24658d318a50SLinus Walleij 		 *  MOP500/HREF ED has 0x0008,
24668d318a50SLinus Walleij 		 *  ? has 0x0018,
24678d318a50SLinus Walleij 		 *  HREF V1 has 0x0028
24688d318a50SLinus Walleij 		 */
24698d318a50SLinus Walleij 		{ .reg = D40_DREG_PERIPHID3, .val = 0x0000},
24708d318a50SLinus Walleij 
24718d318a50SLinus Walleij 		/* PCell Id */
24728d318a50SLinus Walleij 		{ .reg = D40_DREG_CELLID0, .val = 0x000d},
24738d318a50SLinus Walleij 		{ .reg = D40_DREG_CELLID1, .val = 0x00f0},
24748d318a50SLinus Walleij 		{ .reg = D40_DREG_CELLID2, .val = 0x0005},
24758d318a50SLinus Walleij 		{ .reg = D40_DREG_CELLID3, .val = 0x00b1}
24768d318a50SLinus Walleij 	};
24778d318a50SLinus Walleij 	struct stedma40_platform_data *plat_data;
24788d318a50SLinus Walleij 	struct clk *clk = NULL;
24798d318a50SLinus Walleij 	void __iomem *virtbase = NULL;
24808d318a50SLinus Walleij 	struct resource *res = NULL;
24818d318a50SLinus Walleij 	struct d40_base *base = NULL;
24828d318a50SLinus Walleij 	int num_log_chans = 0;
24838d318a50SLinus Walleij 	int num_phy_chans;
24848d318a50SLinus Walleij 	int i;
2485f4185592SLinus Walleij 	u32 val;
24863ae0267fSJonas Aaberg 	u32 rev;
24878d318a50SLinus Walleij 
24888d318a50SLinus Walleij 	clk = clk_get(&pdev->dev, NULL);
24898d318a50SLinus Walleij 
24908d318a50SLinus Walleij 	if (IS_ERR(clk)) {
24918d318a50SLinus Walleij 		dev_err(&pdev->dev, "[%s] No matching clock found\n",
24928d318a50SLinus Walleij 			__func__);
24938d318a50SLinus Walleij 		goto failure;
24948d318a50SLinus Walleij 	}
24958d318a50SLinus Walleij 
24968d318a50SLinus Walleij 	clk_enable(clk);
24978d318a50SLinus Walleij 
24988d318a50SLinus Walleij 	/* Get IO for DMAC base address */
24998d318a50SLinus Walleij 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
25008d318a50SLinus Walleij 	if (!res)
25018d318a50SLinus Walleij 		goto failure;
25028d318a50SLinus Walleij 
25038d318a50SLinus Walleij 	if (request_mem_region(res->start, resource_size(res),
25048d318a50SLinus Walleij 			       D40_NAME " I/O base") == NULL)
25058d318a50SLinus Walleij 		goto failure;
25068d318a50SLinus Walleij 
25078d318a50SLinus Walleij 	virtbase = ioremap(res->start, resource_size(res));
25088d318a50SLinus Walleij 	if (!virtbase)
25098d318a50SLinus Walleij 		goto failure;
25108d318a50SLinus Walleij 
25118d318a50SLinus Walleij 	/* HW version check */
25128d318a50SLinus Walleij 	for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
25138d318a50SLinus Walleij 		if (dma_id_regs[i].val !=
25148d318a50SLinus Walleij 		    readl(virtbase + dma_id_regs[i].reg)) {
25158d318a50SLinus Walleij 			dev_err(&pdev->dev,
25168d318a50SLinus Walleij 				"[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
25178d318a50SLinus Walleij 				__func__,
25188d318a50SLinus Walleij 				dma_id_regs[i].val,
25198d318a50SLinus Walleij 				dma_id_regs[i].reg,
25208d318a50SLinus Walleij 				readl(virtbase + dma_id_regs[i].reg));
25218d318a50SLinus Walleij 			goto failure;
25228d318a50SLinus Walleij 		}
25238d318a50SLinus Walleij 	}
25248d318a50SLinus Walleij 
25253ae0267fSJonas Aaberg 	/* Get silicon revision and designer */
2526f4185592SLinus Walleij 	val = readl(virtbase + D40_DREG_PERIPHID2);
25278d318a50SLinus Walleij 
25283ae0267fSJonas Aaberg 	if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) !=
25293ae0267fSJonas Aaberg 	    D40_HW_DESIGNER) {
25308d318a50SLinus Walleij 		dev_err(&pdev->dev,
25318d318a50SLinus Walleij 			"[%s] Unknown designer! Got %x wanted %x\n",
25323ae0267fSJonas Aaberg 			__func__, val & D40_DREG_PERIPHID2_DESIGNER_MASK,
25333ae0267fSJonas Aaberg 			D40_HW_DESIGNER);
25348d318a50SLinus Walleij 		goto failure;
25358d318a50SLinus Walleij 	}
25368d318a50SLinus Walleij 
25373ae0267fSJonas Aaberg 	rev = (val & D40_DREG_PERIPHID2_REV_MASK) >>
25383ae0267fSJonas Aaberg 		D40_DREG_PERIPHID2_REV_POS;
25393ae0267fSJonas Aaberg 
25408d318a50SLinus Walleij 	/* The number of physical channels on this HW */
25418d318a50SLinus Walleij 	num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
25428d318a50SLinus Walleij 
25438d318a50SLinus Walleij 	dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
25443ae0267fSJonas Aaberg 		 rev, res->start);
25458d318a50SLinus Walleij 
25468d318a50SLinus Walleij 	plat_data = pdev->dev.platform_data;
25478d318a50SLinus Walleij 
25488d318a50SLinus Walleij 	/* Count the number of logical channels in use */
25498d318a50SLinus Walleij 	for (i = 0; i < plat_data->dev_len; i++)
25508d318a50SLinus Walleij 		if (plat_data->dev_rx[i] != 0)
25518d318a50SLinus Walleij 			num_log_chans++;
25528d318a50SLinus Walleij 
25538d318a50SLinus Walleij 	for (i = 0; i < plat_data->dev_len; i++)
25548d318a50SLinus Walleij 		if (plat_data->dev_tx[i] != 0)
25558d318a50SLinus Walleij 			num_log_chans++;
25568d318a50SLinus Walleij 
25578d318a50SLinus Walleij 	base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
25588d318a50SLinus Walleij 		       (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
25598d318a50SLinus Walleij 		       sizeof(struct d40_chan), GFP_KERNEL);
25608d318a50SLinus Walleij 
25618d318a50SLinus Walleij 	if (base == NULL) {
25628d318a50SLinus Walleij 		dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
25638d318a50SLinus Walleij 		goto failure;
25648d318a50SLinus Walleij 	}
25658d318a50SLinus Walleij 
25663ae0267fSJonas Aaberg 	base->rev = rev;
25678d318a50SLinus Walleij 	base->clk = clk;
25688d318a50SLinus Walleij 	base->num_phy_chans = num_phy_chans;
25698d318a50SLinus Walleij 	base->num_log_chans = num_log_chans;
25708d318a50SLinus Walleij 	base->phy_start = res->start;
25718d318a50SLinus Walleij 	base->phy_size = resource_size(res);
25728d318a50SLinus Walleij 	base->virtbase = virtbase;
25738d318a50SLinus Walleij 	base->plat_data = plat_data;
25748d318a50SLinus Walleij 	base->dev = &pdev->dev;
25758d318a50SLinus Walleij 	base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
25768d318a50SLinus Walleij 	base->log_chans = &base->phy_chans[num_phy_chans];
25778d318a50SLinus Walleij 
25788d318a50SLinus Walleij 	base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
25798d318a50SLinus Walleij 				GFP_KERNEL);
25808d318a50SLinus Walleij 	if (!base->phy_res)
25818d318a50SLinus Walleij 		goto failure;
25828d318a50SLinus Walleij 
25838d318a50SLinus Walleij 	base->lookup_phy_chans = kzalloc(num_phy_chans *
25848d318a50SLinus Walleij 					 sizeof(struct d40_chan *),
25858d318a50SLinus Walleij 					 GFP_KERNEL);
25868d318a50SLinus Walleij 	if (!base->lookup_phy_chans)
25878d318a50SLinus Walleij 		goto failure;
25888d318a50SLinus Walleij 
25898d318a50SLinus Walleij 	if (num_log_chans + plat_data->memcpy_len) {
25908d318a50SLinus Walleij 		/*
25918d318a50SLinus Walleij 		 * The max number of logical channels are event lines for all
25928d318a50SLinus Walleij 		 * src devices and dst devices
25938d318a50SLinus Walleij 		 */
25948d318a50SLinus Walleij 		base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
25958d318a50SLinus Walleij 						 sizeof(struct d40_chan *),
25968d318a50SLinus Walleij 						 GFP_KERNEL);
25978d318a50SLinus Walleij 		if (!base->lookup_log_chans)
25988d318a50SLinus Walleij 			goto failure;
25998d318a50SLinus Walleij 	}
2600698e4732SJonas Aaberg 
2601698e4732SJonas Aaberg 	base->lcla_pool.alloc_map = kzalloc(num_phy_chans *
2602698e4732SJonas Aaberg 					    sizeof(struct d40_desc *) *
2603698e4732SJonas Aaberg 					    D40_LCLA_LINK_PER_EVENT_GRP,
26048d318a50SLinus Walleij 					    GFP_KERNEL);
26058d318a50SLinus Walleij 	if (!base->lcla_pool.alloc_map)
26068d318a50SLinus Walleij 		goto failure;
26078d318a50SLinus Walleij 
2608c675b1b4SJonas Aaberg 	base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
2609c675b1b4SJonas Aaberg 					    0, SLAB_HWCACHE_ALIGN,
2610c675b1b4SJonas Aaberg 					    NULL);
2611c675b1b4SJonas Aaberg 	if (base->desc_slab == NULL)
2612c675b1b4SJonas Aaberg 		goto failure;
2613c675b1b4SJonas Aaberg 
26148d318a50SLinus Walleij 	return base;
26158d318a50SLinus Walleij 
26168d318a50SLinus Walleij failure:
2617c6134c96SRabin Vincent 	if (!IS_ERR(clk)) {
26188d318a50SLinus Walleij 		clk_disable(clk);
26198d318a50SLinus Walleij 		clk_put(clk);
26208d318a50SLinus Walleij 	}
26218d318a50SLinus Walleij 	if (virtbase)
26228d318a50SLinus Walleij 		iounmap(virtbase);
26238d318a50SLinus Walleij 	if (res)
26248d318a50SLinus Walleij 		release_mem_region(res->start,
26258d318a50SLinus Walleij 				   resource_size(res));
26268d318a50SLinus Walleij 	if (virtbase)
26278d318a50SLinus Walleij 		iounmap(virtbase);
26288d318a50SLinus Walleij 
26298d318a50SLinus Walleij 	if (base) {
26308d318a50SLinus Walleij 		kfree(base->lcla_pool.alloc_map);
26318d318a50SLinus Walleij 		kfree(base->lookup_log_chans);
26328d318a50SLinus Walleij 		kfree(base->lookup_phy_chans);
26338d318a50SLinus Walleij 		kfree(base->phy_res);
26348d318a50SLinus Walleij 		kfree(base);
26358d318a50SLinus Walleij 	}
26368d318a50SLinus Walleij 
26378d318a50SLinus Walleij 	return NULL;
26388d318a50SLinus Walleij }
26398d318a50SLinus Walleij 
26408d318a50SLinus Walleij static void __init d40_hw_init(struct d40_base *base)
26418d318a50SLinus Walleij {
26428d318a50SLinus Walleij 
26438d318a50SLinus Walleij 	static const struct d40_reg_val dma_init_reg[] = {
26448d318a50SLinus Walleij 		/* Clock every part of the DMA block from start */
26458d318a50SLinus Walleij 		{ .reg = D40_DREG_GCC,    .val = 0x0000ff01},
26468d318a50SLinus Walleij 
26478d318a50SLinus Walleij 		/* Interrupts on all logical channels */
26488d318a50SLinus Walleij 		{ .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
26498d318a50SLinus Walleij 		{ .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
26508d318a50SLinus Walleij 		{ .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
26518d318a50SLinus Walleij 		{ .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
26528d318a50SLinus Walleij 		{ .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
26538d318a50SLinus Walleij 		{ .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
26548d318a50SLinus Walleij 		{ .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
26558d318a50SLinus Walleij 		{ .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
26568d318a50SLinus Walleij 		{ .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
26578d318a50SLinus Walleij 		{ .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
26588d318a50SLinus Walleij 		{ .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
26598d318a50SLinus Walleij 		{ .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
26608d318a50SLinus Walleij 	};
26618d318a50SLinus Walleij 	int i;
26628d318a50SLinus Walleij 	u32 prmseo[2] = {0, 0};
26638d318a50SLinus Walleij 	u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
26648d318a50SLinus Walleij 	u32 pcmis = 0;
26658d318a50SLinus Walleij 	u32 pcicr = 0;
26668d318a50SLinus Walleij 
26678d318a50SLinus Walleij 	for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
26688d318a50SLinus Walleij 		writel(dma_init_reg[i].val,
26698d318a50SLinus Walleij 		       base->virtbase + dma_init_reg[i].reg);
26708d318a50SLinus Walleij 
26718d318a50SLinus Walleij 	/* Configure all our dma channels to default settings */
26728d318a50SLinus Walleij 	for (i = 0; i < base->num_phy_chans; i++) {
26738d318a50SLinus Walleij 
26748d318a50SLinus Walleij 		activeo[i % 2] = activeo[i % 2] << 2;
26758d318a50SLinus Walleij 
26768d318a50SLinus Walleij 		if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
26778d318a50SLinus Walleij 		    == D40_ALLOC_PHY) {
26788d318a50SLinus Walleij 			activeo[i % 2] |= 3;
26798d318a50SLinus Walleij 			continue;
26808d318a50SLinus Walleij 		}
26818d318a50SLinus Walleij 
26828d318a50SLinus Walleij 		/* Enable interrupt # */
26838d318a50SLinus Walleij 		pcmis = (pcmis << 1) | 1;
26848d318a50SLinus Walleij 
26858d318a50SLinus Walleij 		/* Clear interrupt # */
26868d318a50SLinus Walleij 		pcicr = (pcicr << 1) | 1;
26878d318a50SLinus Walleij 
26888d318a50SLinus Walleij 		/* Set channel to physical mode */
26898d318a50SLinus Walleij 		prmseo[i % 2] = prmseo[i % 2] << 2;
26908d318a50SLinus Walleij 		prmseo[i % 2] |= 1;
26918d318a50SLinus Walleij 
26928d318a50SLinus Walleij 	}
26938d318a50SLinus Walleij 
26948d318a50SLinus Walleij 	writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
26958d318a50SLinus Walleij 	writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
26968d318a50SLinus Walleij 	writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
26978d318a50SLinus Walleij 	writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
26988d318a50SLinus Walleij 
26998d318a50SLinus Walleij 	/* Write which interrupt to enable */
27008d318a50SLinus Walleij 	writel(pcmis, base->virtbase + D40_DREG_PCMIS);
27018d318a50SLinus Walleij 
27028d318a50SLinus Walleij 	/* Write which interrupt to clear */
27038d318a50SLinus Walleij 	writel(pcicr, base->virtbase + D40_DREG_PCICR);
27048d318a50SLinus Walleij 
27058d318a50SLinus Walleij }
27068d318a50SLinus Walleij 
2707508849adSLinus Walleij static int __init d40_lcla_allocate(struct d40_base *base)
2708508849adSLinus Walleij {
2709508849adSLinus Walleij 	unsigned long *page_list;
2710508849adSLinus Walleij 	int i, j;
2711508849adSLinus Walleij 	int ret = 0;
2712508849adSLinus Walleij 
2713508849adSLinus Walleij 	/*
2714508849adSLinus Walleij 	 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
2715508849adSLinus Walleij 	 * To full fill this hardware requirement without wasting 256 kb
2716508849adSLinus Walleij 	 * we allocate pages until we get an aligned one.
2717508849adSLinus Walleij 	 */
2718508849adSLinus Walleij 	page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
2719508849adSLinus Walleij 			    GFP_KERNEL);
2720508849adSLinus Walleij 
2721508849adSLinus Walleij 	if (!page_list) {
2722508849adSLinus Walleij 		ret = -ENOMEM;
2723508849adSLinus Walleij 		goto failure;
2724508849adSLinus Walleij 	}
2725508849adSLinus Walleij 
2726508849adSLinus Walleij 	/* Calculating how many pages that are required */
2727508849adSLinus Walleij 	base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
2728508849adSLinus Walleij 
2729508849adSLinus Walleij 	for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
2730508849adSLinus Walleij 		page_list[i] = __get_free_pages(GFP_KERNEL,
2731508849adSLinus Walleij 						base->lcla_pool.pages);
2732508849adSLinus Walleij 		if (!page_list[i]) {
2733508849adSLinus Walleij 
2734508849adSLinus Walleij 			dev_err(base->dev,
2735508849adSLinus Walleij 				"[%s] Failed to allocate %d pages.\n",
2736508849adSLinus Walleij 				__func__, base->lcla_pool.pages);
2737508849adSLinus Walleij 
2738508849adSLinus Walleij 			for (j = 0; j < i; j++)
2739508849adSLinus Walleij 				free_pages(page_list[j], base->lcla_pool.pages);
2740508849adSLinus Walleij 			goto failure;
2741508849adSLinus Walleij 		}
2742508849adSLinus Walleij 
2743508849adSLinus Walleij 		if ((virt_to_phys((void *)page_list[i]) &
2744508849adSLinus Walleij 		     (LCLA_ALIGNMENT - 1)) == 0)
2745508849adSLinus Walleij 			break;
2746508849adSLinus Walleij 	}
2747508849adSLinus Walleij 
2748508849adSLinus Walleij 	for (j = 0; j < i; j++)
2749508849adSLinus Walleij 		free_pages(page_list[j], base->lcla_pool.pages);
2750508849adSLinus Walleij 
2751508849adSLinus Walleij 	if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
2752508849adSLinus Walleij 		base->lcla_pool.base = (void *)page_list[i];
2753508849adSLinus Walleij 	} else {
2754767a9675SJonas Aaberg 		/*
2755767a9675SJonas Aaberg 		 * After many attempts and no succees with finding the correct
2756767a9675SJonas Aaberg 		 * alignment, try with allocating a big buffer.
2757767a9675SJonas Aaberg 		 */
2758508849adSLinus Walleij 		dev_warn(base->dev,
2759508849adSLinus Walleij 			 "[%s] Failed to get %d pages @ 18 bit align.\n",
2760508849adSLinus Walleij 			 __func__, base->lcla_pool.pages);
2761508849adSLinus Walleij 		base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
2762508849adSLinus Walleij 							 base->num_phy_chans +
2763508849adSLinus Walleij 							 LCLA_ALIGNMENT,
2764508849adSLinus Walleij 							 GFP_KERNEL);
2765508849adSLinus Walleij 		if (!base->lcla_pool.base_unaligned) {
2766508849adSLinus Walleij 			ret = -ENOMEM;
2767508849adSLinus Walleij 			goto failure;
2768508849adSLinus Walleij 		}
2769508849adSLinus Walleij 
2770508849adSLinus Walleij 		base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
2771508849adSLinus Walleij 						 LCLA_ALIGNMENT);
2772508849adSLinus Walleij 	}
2773508849adSLinus Walleij 
2774508849adSLinus Walleij 	writel(virt_to_phys(base->lcla_pool.base),
2775508849adSLinus Walleij 	       base->virtbase + D40_DREG_LCLA);
2776508849adSLinus Walleij failure:
2777508849adSLinus Walleij 	kfree(page_list);
2778508849adSLinus Walleij 	return ret;
2779508849adSLinus Walleij }
2780508849adSLinus Walleij 
27818d318a50SLinus Walleij static int __init d40_probe(struct platform_device *pdev)
27828d318a50SLinus Walleij {
27838d318a50SLinus Walleij 	int err;
27848d318a50SLinus Walleij 	int ret = -ENOENT;
27858d318a50SLinus Walleij 	struct d40_base *base;
27868d318a50SLinus Walleij 	struct resource *res = NULL;
27878d318a50SLinus Walleij 	int num_reserved_chans;
27888d318a50SLinus Walleij 	u32 val;
27898d318a50SLinus Walleij 
27908d318a50SLinus Walleij 	base = d40_hw_detect_init(pdev);
27918d318a50SLinus Walleij 
27928d318a50SLinus Walleij 	if (!base)
27938d318a50SLinus Walleij 		goto failure;
27948d318a50SLinus Walleij 
27958d318a50SLinus Walleij 	num_reserved_chans = d40_phy_res_init(base);
27968d318a50SLinus Walleij 
27978d318a50SLinus Walleij 	platform_set_drvdata(pdev, base);
27988d318a50SLinus Walleij 
27998d318a50SLinus Walleij 	spin_lock_init(&base->interrupt_lock);
28008d318a50SLinus Walleij 	spin_lock_init(&base->execmd_lock);
28018d318a50SLinus Walleij 
28028d318a50SLinus Walleij 	/* Get IO for logical channel parameter address */
28038d318a50SLinus Walleij 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
28048d318a50SLinus Walleij 	if (!res) {
28058d318a50SLinus Walleij 		ret = -ENOENT;
28068d318a50SLinus Walleij 		dev_err(&pdev->dev,
28078d318a50SLinus Walleij 			"[%s] No \"lcpa\" memory resource\n",
28088d318a50SLinus Walleij 			__func__);
28098d318a50SLinus Walleij 		goto failure;
28108d318a50SLinus Walleij 	}
28118d318a50SLinus Walleij 	base->lcpa_size = resource_size(res);
28128d318a50SLinus Walleij 	base->phy_lcpa = res->start;
28138d318a50SLinus Walleij 
28148d318a50SLinus Walleij 	if (request_mem_region(res->start, resource_size(res),
28158d318a50SLinus Walleij 			       D40_NAME " I/O lcpa") == NULL) {
28168d318a50SLinus Walleij 		ret = -EBUSY;
28178d318a50SLinus Walleij 		dev_err(&pdev->dev,
28188d318a50SLinus Walleij 			"[%s] Failed to request LCPA region 0x%x-0x%x\n",
28198d318a50SLinus Walleij 			__func__, res->start, res->end);
28208d318a50SLinus Walleij 		goto failure;
28218d318a50SLinus Walleij 	}
28228d318a50SLinus Walleij 
28238d318a50SLinus Walleij 	/* We make use of ESRAM memory for this. */
28248d318a50SLinus Walleij 	val = readl(base->virtbase + D40_DREG_LCPA);
28258d318a50SLinus Walleij 	if (res->start != val && val != 0) {
28268d318a50SLinus Walleij 		dev_warn(&pdev->dev,
28278d318a50SLinus Walleij 			 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
28288d318a50SLinus Walleij 			 __func__, val, res->start);
28298d318a50SLinus Walleij 	} else
28308d318a50SLinus Walleij 		writel(res->start, base->virtbase + D40_DREG_LCPA);
28318d318a50SLinus Walleij 
28328d318a50SLinus Walleij 	base->lcpa_base = ioremap(res->start, resource_size(res));
28338d318a50SLinus Walleij 	if (!base->lcpa_base) {
28348d318a50SLinus Walleij 		ret = -ENOMEM;
28358d318a50SLinus Walleij 		dev_err(&pdev->dev,
28368d318a50SLinus Walleij 			"[%s] Failed to ioremap LCPA region\n",
28378d318a50SLinus Walleij 			__func__);
28388d318a50SLinus Walleij 		goto failure;
28398d318a50SLinus Walleij 	}
2840508849adSLinus Walleij 
2841508849adSLinus Walleij 	ret = d40_lcla_allocate(base);
2842508849adSLinus Walleij 	if (ret) {
2843508849adSLinus Walleij 		dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n",
28448d318a50SLinus Walleij 			__func__);
28458d318a50SLinus Walleij 		goto failure;
28468d318a50SLinus Walleij 	}
28478d318a50SLinus Walleij 
28488d318a50SLinus Walleij 	spin_lock_init(&base->lcla_pool.lock);
28498d318a50SLinus Walleij 
28508d318a50SLinus Walleij 	base->irq = platform_get_irq(pdev, 0);
28518d318a50SLinus Walleij 
28528d318a50SLinus Walleij 	ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
28538d318a50SLinus Walleij 
28548d318a50SLinus Walleij 	if (ret) {
28558d318a50SLinus Walleij 		dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
28568d318a50SLinus Walleij 		goto failure;
28578d318a50SLinus Walleij 	}
28588d318a50SLinus Walleij 
28598d318a50SLinus Walleij 	err = d40_dmaengine_init(base, num_reserved_chans);
28608d318a50SLinus Walleij 	if (err)
28618d318a50SLinus Walleij 		goto failure;
28628d318a50SLinus Walleij 
28638d318a50SLinus Walleij 	d40_hw_init(base);
28648d318a50SLinus Walleij 
28658d318a50SLinus Walleij 	dev_info(base->dev, "initialized\n");
28668d318a50SLinus Walleij 	return 0;
28678d318a50SLinus Walleij 
28688d318a50SLinus Walleij failure:
28698d318a50SLinus Walleij 	if (base) {
2870c675b1b4SJonas Aaberg 		if (base->desc_slab)
2871c675b1b4SJonas Aaberg 			kmem_cache_destroy(base->desc_slab);
28728d318a50SLinus Walleij 		if (base->virtbase)
28738d318a50SLinus Walleij 			iounmap(base->virtbase);
2874508849adSLinus Walleij 		if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
2875508849adSLinus Walleij 			free_pages((unsigned long)base->lcla_pool.base,
2876508849adSLinus Walleij 				   base->lcla_pool.pages);
2877767a9675SJonas Aaberg 
2878508849adSLinus Walleij 		kfree(base->lcla_pool.base_unaligned);
2879767a9675SJonas Aaberg 
28808d318a50SLinus Walleij 		if (base->phy_lcpa)
28818d318a50SLinus Walleij 			release_mem_region(base->phy_lcpa,
28828d318a50SLinus Walleij 					   base->lcpa_size);
28838d318a50SLinus Walleij 		if (base->phy_start)
28848d318a50SLinus Walleij 			release_mem_region(base->phy_start,
28858d318a50SLinus Walleij 					   base->phy_size);
28868d318a50SLinus Walleij 		if (base->clk) {
28878d318a50SLinus Walleij 			clk_disable(base->clk);
28888d318a50SLinus Walleij 			clk_put(base->clk);
28898d318a50SLinus Walleij 		}
28908d318a50SLinus Walleij 
28918d318a50SLinus Walleij 		kfree(base->lcla_pool.alloc_map);
28928d318a50SLinus Walleij 		kfree(base->lookup_log_chans);
28938d318a50SLinus Walleij 		kfree(base->lookup_phy_chans);
28948d318a50SLinus Walleij 		kfree(base->phy_res);
28958d318a50SLinus Walleij 		kfree(base);
28968d318a50SLinus Walleij 	}
28978d318a50SLinus Walleij 
28988d318a50SLinus Walleij 	dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
28998d318a50SLinus Walleij 	return ret;
29008d318a50SLinus Walleij }
29018d318a50SLinus Walleij 
29028d318a50SLinus Walleij static struct platform_driver d40_driver = {
29038d318a50SLinus Walleij 	.driver = {
29048d318a50SLinus Walleij 		.owner = THIS_MODULE,
29058d318a50SLinus Walleij 		.name  = D40_NAME,
29068d318a50SLinus Walleij 	},
29078d318a50SLinus Walleij };
29088d318a50SLinus Walleij 
29098d318a50SLinus Walleij int __init stedma40_init(void)
29108d318a50SLinus Walleij {
29118d318a50SLinus Walleij 	return platform_driver_probe(&d40_driver, d40_probe);
29128d318a50SLinus Walleij }
29138d318a50SLinus Walleij arch_initcall(stedma40_init);
2914