xref: /linux/drivers/dma/ste_dma40.c (revision c6134c967c5b8b5986371de335fa4ec39de268bc)
18d318a50SLinus Walleij /*
2767a9675SJonas Aaberg  * Copyright (C) ST-Ericsson SA 2007-2010
3767a9675SJonas Aaberg  * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson
4767a9675SJonas Aaberg  * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
58d318a50SLinus Walleij  * License terms: GNU General Public License (GPL) version 2
68d318a50SLinus Walleij  */
78d318a50SLinus Walleij 
88d318a50SLinus Walleij #include <linux/kernel.h>
98d318a50SLinus Walleij #include <linux/slab.h>
108d318a50SLinus Walleij #include <linux/dmaengine.h>
118d318a50SLinus Walleij #include <linux/platform_device.h>
128d318a50SLinus Walleij #include <linux/clk.h>
138d318a50SLinus Walleij #include <linux/delay.h>
14698e4732SJonas Aaberg #include <linux/err.h>
158d318a50SLinus Walleij 
168d318a50SLinus Walleij #include <plat/ste_dma40.h>
178d318a50SLinus Walleij 
188d318a50SLinus Walleij #include "ste_dma40_ll.h"
198d318a50SLinus Walleij 
208d318a50SLinus Walleij #define D40_NAME "dma40"
218d318a50SLinus Walleij 
228d318a50SLinus Walleij #define D40_PHY_CHAN -1
238d318a50SLinus Walleij 
248d318a50SLinus Walleij /* For masking out/in 2 bit channel positions */
258d318a50SLinus Walleij #define D40_CHAN_POS(chan)  (2 * (chan / 2))
268d318a50SLinus Walleij #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
278d318a50SLinus Walleij 
288d318a50SLinus Walleij /* Maximum iterations taken before giving up suspending a channel */
298d318a50SLinus Walleij #define D40_SUSPEND_MAX_IT 500
308d318a50SLinus Walleij 
31508849adSLinus Walleij /* Hardware requirement on LCLA alignment */
32508849adSLinus Walleij #define LCLA_ALIGNMENT 0x40000
33698e4732SJonas Aaberg 
34698e4732SJonas Aaberg /* Max number of links per event group */
35698e4732SJonas Aaberg #define D40_LCLA_LINK_PER_EVENT_GRP 128
36698e4732SJonas Aaberg #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
37698e4732SJonas Aaberg 
38508849adSLinus Walleij /* Attempts before giving up to trying to get pages that are aligned */
39508849adSLinus Walleij #define MAX_LCLA_ALLOC_ATTEMPTS 256
40508849adSLinus Walleij 
41508849adSLinus Walleij /* Bit markings for allocation map */
428d318a50SLinus Walleij #define D40_ALLOC_FREE		(1 << 31)
438d318a50SLinus Walleij #define D40_ALLOC_PHY		(1 << 30)
448d318a50SLinus Walleij #define D40_ALLOC_LOG_FREE	0
458d318a50SLinus Walleij 
468d318a50SLinus Walleij /* Hardware designer of the block */
473ae0267fSJonas Aaberg #define D40_HW_DESIGNER 0x8
488d318a50SLinus Walleij 
498d318a50SLinus Walleij /**
508d318a50SLinus Walleij  * enum 40_command - The different commands and/or statuses.
518d318a50SLinus Walleij  *
528d318a50SLinus Walleij  * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
538d318a50SLinus Walleij  * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
548d318a50SLinus Walleij  * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
558d318a50SLinus Walleij  * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
568d318a50SLinus Walleij  */
578d318a50SLinus Walleij enum d40_command {
588d318a50SLinus Walleij 	D40_DMA_STOP		= 0,
598d318a50SLinus Walleij 	D40_DMA_RUN		= 1,
608d318a50SLinus Walleij 	D40_DMA_SUSPEND_REQ	= 2,
618d318a50SLinus Walleij 	D40_DMA_SUSPENDED	= 3
628d318a50SLinus Walleij };
638d318a50SLinus Walleij 
648d318a50SLinus Walleij /**
658d318a50SLinus Walleij  * struct d40_lli_pool - Structure for keeping LLIs in memory
668d318a50SLinus Walleij  *
678d318a50SLinus Walleij  * @base: Pointer to memory area when the pre_alloc_lli's are not large
688d318a50SLinus Walleij  * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
698d318a50SLinus Walleij  * pre_alloc_lli is used.
708d318a50SLinus Walleij  * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
718d318a50SLinus Walleij  * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
728d318a50SLinus Walleij  * one buffer to one buffer.
738d318a50SLinus Walleij  */
748d318a50SLinus Walleij struct d40_lli_pool {
758d318a50SLinus Walleij 	void	*base;
768d318a50SLinus Walleij 	int	 size;
778d318a50SLinus Walleij 	/* Space for dst and src, plus an extra for padding */
788d318a50SLinus Walleij 	u8	 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
798d318a50SLinus Walleij };
808d318a50SLinus Walleij 
818d318a50SLinus Walleij /**
828d318a50SLinus Walleij  * struct d40_desc - A descriptor is one DMA job.
838d318a50SLinus Walleij  *
848d318a50SLinus Walleij  * @lli_phy: LLI settings for physical channel. Both src and dst=
858d318a50SLinus Walleij  * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
868d318a50SLinus Walleij  * lli_len equals one.
878d318a50SLinus Walleij  * @lli_log: Same as above but for logical channels.
888d318a50SLinus Walleij  * @lli_pool: The pool with two entries pre-allocated.
89941b77a3SPer Friden  * @lli_len: Number of llis of current descriptor.
90698e4732SJonas Aaberg  * @lli_current: Number of transfered llis.
91698e4732SJonas Aaberg  * @lcla_alloc: Number of LCLA entries allocated.
928d318a50SLinus Walleij  * @txd: DMA engine struct. Used for among other things for communication
938d318a50SLinus Walleij  * during a transfer.
948d318a50SLinus Walleij  * @node: List entry.
958d318a50SLinus Walleij  * @is_in_client_list: true if the client owns this descriptor.
96aa182ae2SJonas Aaberg  * @is_hw_linked: true if this job will automatically be continued for
97aa182ae2SJonas Aaberg  * the previous one.
988d318a50SLinus Walleij  *
998d318a50SLinus Walleij  * This descriptor is used for both logical and physical transfers.
1008d318a50SLinus Walleij  */
1018d318a50SLinus Walleij struct d40_desc {
1028d318a50SLinus Walleij 	/* LLI physical */
1038d318a50SLinus Walleij 	struct d40_phy_lli_bidir	 lli_phy;
1048d318a50SLinus Walleij 	/* LLI logical */
1058d318a50SLinus Walleij 	struct d40_log_lli_bidir	 lli_log;
1068d318a50SLinus Walleij 
1078d318a50SLinus Walleij 	struct d40_lli_pool		 lli_pool;
108941b77a3SPer Friden 	int				 lli_len;
109698e4732SJonas Aaberg 	int				 lli_current;
110698e4732SJonas Aaberg 	int				 lcla_alloc;
1118d318a50SLinus Walleij 
1128d318a50SLinus Walleij 	struct dma_async_tx_descriptor	 txd;
1138d318a50SLinus Walleij 	struct list_head		 node;
1148d318a50SLinus Walleij 
1158d318a50SLinus Walleij 	bool				 is_in_client_list;
116aa182ae2SJonas Aaberg 	bool				 is_hw_linked;
1178d318a50SLinus Walleij };
1188d318a50SLinus Walleij 
1198d318a50SLinus Walleij /**
1208d318a50SLinus Walleij  * struct d40_lcla_pool - LCLA pool settings and data.
1218d318a50SLinus Walleij  *
122508849adSLinus Walleij  * @base: The virtual address of LCLA. 18 bit aligned.
123508849adSLinus Walleij  * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
124508849adSLinus Walleij  * This pointer is only there for clean-up on error.
125508849adSLinus Walleij  * @pages: The number of pages needed for all physical channels.
126508849adSLinus Walleij  * Only used later for clean-up on error
1278d318a50SLinus Walleij  * @lock: Lock to protect the content in this struct.
128698e4732SJonas Aaberg  * @alloc_map: big map over which LCLA entry is own by which job.
1298d318a50SLinus Walleij  */
1308d318a50SLinus Walleij struct d40_lcla_pool {
1318d318a50SLinus Walleij 	void		*base;
132508849adSLinus Walleij 	void		*base_unaligned;
133508849adSLinus Walleij 	int		 pages;
1348d318a50SLinus Walleij 	spinlock_t	 lock;
135698e4732SJonas Aaberg 	struct d40_desc	**alloc_map;
1368d318a50SLinus Walleij };
1378d318a50SLinus Walleij 
1388d318a50SLinus Walleij /**
1398d318a50SLinus Walleij  * struct d40_phy_res - struct for handling eventlines mapped to physical
1408d318a50SLinus Walleij  * channels.
1418d318a50SLinus Walleij  *
1428d318a50SLinus Walleij  * @lock: A lock protection this entity.
1438d318a50SLinus Walleij  * @num: The physical channel number of this entity.
1448d318a50SLinus Walleij  * @allocated_src: Bit mapped to show which src event line's are mapped to
1458d318a50SLinus Walleij  * this physical channel. Can also be free or physically allocated.
1468d318a50SLinus Walleij  * @allocated_dst: Same as for src but is dst.
1478d318a50SLinus Walleij  * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
148767a9675SJonas Aaberg  * event line number.
1498d318a50SLinus Walleij  */
1508d318a50SLinus Walleij struct d40_phy_res {
1518d318a50SLinus Walleij 	spinlock_t lock;
1528d318a50SLinus Walleij 	int	   num;
1538d318a50SLinus Walleij 	u32	   allocated_src;
1548d318a50SLinus Walleij 	u32	   allocated_dst;
1558d318a50SLinus Walleij };
1568d318a50SLinus Walleij 
1578d318a50SLinus Walleij struct d40_base;
1588d318a50SLinus Walleij 
1598d318a50SLinus Walleij /**
1608d318a50SLinus Walleij  * struct d40_chan - Struct that describes a channel.
1618d318a50SLinus Walleij  *
1628d318a50SLinus Walleij  * @lock: A spinlock to protect this struct.
1638d318a50SLinus Walleij  * @log_num: The logical number, if any of this channel.
1648d318a50SLinus Walleij  * @completed: Starts with 1, after first interrupt it is set to dma engine's
1658d318a50SLinus Walleij  * current cookie.
1668d318a50SLinus Walleij  * @pending_tx: The number of pending transfers. Used between interrupt handler
1678d318a50SLinus Walleij  * and tasklet.
1688d318a50SLinus Walleij  * @busy: Set to true when transfer is ongoing on this channel.
1692a614340SJonas Aaberg  * @phy_chan: Pointer to physical channel which this instance runs on. If this
1702a614340SJonas Aaberg  * point is NULL, then the channel is not allocated.
1718d318a50SLinus Walleij  * @chan: DMA engine handle.
1728d318a50SLinus Walleij  * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
1738d318a50SLinus Walleij  * transfer and call client callback.
1748d318a50SLinus Walleij  * @client: Cliented owned descriptor list.
1758d318a50SLinus Walleij  * @active: Active descriptor.
1768d318a50SLinus Walleij  * @queue: Queued jobs.
1778d318a50SLinus Walleij  * @dma_cfg: The client configuration of this dma channel.
1788d318a50SLinus Walleij  * @base: Pointer to the device instance struct.
1798d318a50SLinus Walleij  * @src_def_cfg: Default cfg register setting for src.
1808d318a50SLinus Walleij  * @dst_def_cfg: Default cfg register setting for dst.
1818d318a50SLinus Walleij  * @log_def: Default logical channel settings.
1828d318a50SLinus Walleij  * @lcla: Space for one dst src pair for logical channel transfers.
1838d318a50SLinus Walleij  * @lcpa: Pointer to dst and src lcpa settings.
1848d318a50SLinus Walleij  *
1858d318a50SLinus Walleij  * This struct can either "be" a logical or a physical channel.
1868d318a50SLinus Walleij  */
1878d318a50SLinus Walleij struct d40_chan {
1888d318a50SLinus Walleij 	spinlock_t			 lock;
1898d318a50SLinus Walleij 	int				 log_num;
1908d318a50SLinus Walleij 	/* ID of the most recent completed transfer */
1918d318a50SLinus Walleij 	int				 completed;
1928d318a50SLinus Walleij 	int				 pending_tx;
1938d318a50SLinus Walleij 	bool				 busy;
1948d318a50SLinus Walleij 	struct d40_phy_res		*phy_chan;
1958d318a50SLinus Walleij 	struct dma_chan			 chan;
1968d318a50SLinus Walleij 	struct tasklet_struct		 tasklet;
1978d318a50SLinus Walleij 	struct list_head		 client;
1988d318a50SLinus Walleij 	struct list_head		 active;
1998d318a50SLinus Walleij 	struct list_head		 queue;
2008d318a50SLinus Walleij 	struct stedma40_chan_cfg	 dma_cfg;
2018d318a50SLinus Walleij 	struct d40_base			*base;
2028d318a50SLinus Walleij 	/* Default register configurations */
2038d318a50SLinus Walleij 	u32				 src_def_cfg;
2048d318a50SLinus Walleij 	u32				 dst_def_cfg;
2058d318a50SLinus Walleij 	struct d40_def_lcsp		 log_def;
2068d318a50SLinus Walleij 	struct d40_log_lli_full		*lcpa;
20795e1400fSLinus Walleij 	/* Runtime reconfiguration */
20895e1400fSLinus Walleij 	dma_addr_t			runtime_addr;
20995e1400fSLinus Walleij 	enum dma_data_direction		runtime_direction;
2108d318a50SLinus Walleij };
2118d318a50SLinus Walleij 
2128d318a50SLinus Walleij /**
2138d318a50SLinus Walleij  * struct d40_base - The big global struct, one for each probe'd instance.
2148d318a50SLinus Walleij  *
2158d318a50SLinus Walleij  * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
2168d318a50SLinus Walleij  * @execmd_lock: Lock for execute command usage since several channels share
2178d318a50SLinus Walleij  * the same physical register.
2188d318a50SLinus Walleij  * @dev: The device structure.
2198d318a50SLinus Walleij  * @virtbase: The virtual base address of the DMA's register.
220f4185592SLinus Walleij  * @rev: silicon revision detected.
2218d318a50SLinus Walleij  * @clk: Pointer to the DMA clock structure.
2228d318a50SLinus Walleij  * @phy_start: Physical memory start of the DMA registers.
2238d318a50SLinus Walleij  * @phy_size: Size of the DMA register map.
2248d318a50SLinus Walleij  * @irq: The IRQ number.
2258d318a50SLinus Walleij  * @num_phy_chans: The number of physical channels. Read from HW. This
2268d318a50SLinus Walleij  * is the number of available channels for this driver, not counting "Secure
2278d318a50SLinus Walleij  * mode" allocated physical channels.
2288d318a50SLinus Walleij  * @num_log_chans: The number of logical channels. Calculated from
2298d318a50SLinus Walleij  * num_phy_chans.
2308d318a50SLinus Walleij  * @dma_both: dma_device channels that can do both memcpy and slave transfers.
2318d318a50SLinus Walleij  * @dma_slave: dma_device channels that can do only do slave transfers.
2328d318a50SLinus Walleij  * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
2338d318a50SLinus Walleij  * @log_chans: Room for all possible logical channels in system.
2348d318a50SLinus Walleij  * @lookup_log_chans: Used to map interrupt number to logical channel. Points
2358d318a50SLinus Walleij  * to log_chans entries.
2368d318a50SLinus Walleij  * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
2378d318a50SLinus Walleij  * to phy_chans entries.
2388d318a50SLinus Walleij  * @plat_data: Pointer to provided platform_data which is the driver
2398d318a50SLinus Walleij  * configuration.
2408d318a50SLinus Walleij  * @phy_res: Vector containing all physical channels.
2418d318a50SLinus Walleij  * @lcla_pool: lcla pool settings and data.
2428d318a50SLinus Walleij  * @lcpa_base: The virtual mapped address of LCPA.
2438d318a50SLinus Walleij  * @phy_lcpa: The physical address of the LCPA.
2448d318a50SLinus Walleij  * @lcpa_size: The size of the LCPA area.
245c675b1b4SJonas Aaberg  * @desc_slab: cache for descriptors.
2468d318a50SLinus Walleij  */
2478d318a50SLinus Walleij struct d40_base {
2488d318a50SLinus Walleij 	spinlock_t			 interrupt_lock;
2498d318a50SLinus Walleij 	spinlock_t			 execmd_lock;
2508d318a50SLinus Walleij 	struct device			 *dev;
2518d318a50SLinus Walleij 	void __iomem			 *virtbase;
252f4185592SLinus Walleij 	u8				  rev:4;
2538d318a50SLinus Walleij 	struct clk			 *clk;
2548d318a50SLinus Walleij 	phys_addr_t			  phy_start;
2558d318a50SLinus Walleij 	resource_size_t			  phy_size;
2568d318a50SLinus Walleij 	int				  irq;
2578d318a50SLinus Walleij 	int				  num_phy_chans;
2588d318a50SLinus Walleij 	int				  num_log_chans;
2598d318a50SLinus Walleij 	struct dma_device		  dma_both;
2608d318a50SLinus Walleij 	struct dma_device		  dma_slave;
2618d318a50SLinus Walleij 	struct dma_device		  dma_memcpy;
2628d318a50SLinus Walleij 	struct d40_chan			 *phy_chans;
2638d318a50SLinus Walleij 	struct d40_chan			 *log_chans;
2648d318a50SLinus Walleij 	struct d40_chan			**lookup_log_chans;
2658d318a50SLinus Walleij 	struct d40_chan			**lookup_phy_chans;
2668d318a50SLinus Walleij 	struct stedma40_platform_data	 *plat_data;
2678d318a50SLinus Walleij 	/* Physical half channels */
2688d318a50SLinus Walleij 	struct d40_phy_res		 *phy_res;
2698d318a50SLinus Walleij 	struct d40_lcla_pool		  lcla_pool;
2708d318a50SLinus Walleij 	void				 *lcpa_base;
2718d318a50SLinus Walleij 	dma_addr_t			  phy_lcpa;
2728d318a50SLinus Walleij 	resource_size_t			  lcpa_size;
273c675b1b4SJonas Aaberg 	struct kmem_cache		 *desc_slab;
2748d318a50SLinus Walleij };
2758d318a50SLinus Walleij 
2768d318a50SLinus Walleij /**
2778d318a50SLinus Walleij  * struct d40_interrupt_lookup - lookup table for interrupt handler
2788d318a50SLinus Walleij  *
2798d318a50SLinus Walleij  * @src: Interrupt mask register.
2808d318a50SLinus Walleij  * @clr: Interrupt clear register.
2818d318a50SLinus Walleij  * @is_error: true if this is an error interrupt.
2828d318a50SLinus Walleij  * @offset: start delta in the lookup_log_chans in d40_base. If equals to
2838d318a50SLinus Walleij  * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
2848d318a50SLinus Walleij  */
2858d318a50SLinus Walleij struct d40_interrupt_lookup {
2868d318a50SLinus Walleij 	u32 src;
2878d318a50SLinus Walleij 	u32 clr;
2888d318a50SLinus Walleij 	bool is_error;
2898d318a50SLinus Walleij 	int offset;
2908d318a50SLinus Walleij };
2918d318a50SLinus Walleij 
2928d318a50SLinus Walleij /**
2938d318a50SLinus Walleij  * struct d40_reg_val - simple lookup struct
2948d318a50SLinus Walleij  *
2958d318a50SLinus Walleij  * @reg: The register.
2968d318a50SLinus Walleij  * @val: The value that belongs to the register in reg.
2978d318a50SLinus Walleij  */
2988d318a50SLinus Walleij struct d40_reg_val {
2998d318a50SLinus Walleij 	unsigned int reg;
3008d318a50SLinus Walleij 	unsigned int val;
3018d318a50SLinus Walleij };
3028d318a50SLinus Walleij 
3038d318a50SLinus Walleij static int d40_pool_lli_alloc(struct d40_desc *d40d,
3048d318a50SLinus Walleij 			      int lli_len, bool is_log)
3058d318a50SLinus Walleij {
3068d318a50SLinus Walleij 	u32 align;
3078d318a50SLinus Walleij 	void *base;
3088d318a50SLinus Walleij 
3098d318a50SLinus Walleij 	if (is_log)
3108d318a50SLinus Walleij 		align = sizeof(struct d40_log_lli);
3118d318a50SLinus Walleij 	else
3128d318a50SLinus Walleij 		align = sizeof(struct d40_phy_lli);
3138d318a50SLinus Walleij 
3148d318a50SLinus Walleij 	if (lli_len == 1) {
3158d318a50SLinus Walleij 		base = d40d->lli_pool.pre_alloc_lli;
3168d318a50SLinus Walleij 		d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
3178d318a50SLinus Walleij 		d40d->lli_pool.base = NULL;
3188d318a50SLinus Walleij 	} else {
3198d318a50SLinus Walleij 		d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
3208d318a50SLinus Walleij 
3218d318a50SLinus Walleij 		base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
3228d318a50SLinus Walleij 		d40d->lli_pool.base = base;
3238d318a50SLinus Walleij 
3248d318a50SLinus Walleij 		if (d40d->lli_pool.base == NULL)
3258d318a50SLinus Walleij 			return -ENOMEM;
3268d318a50SLinus Walleij 	}
3278d318a50SLinus Walleij 
3288d318a50SLinus Walleij 	if (is_log) {
3298d318a50SLinus Walleij 		d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
3308d318a50SLinus Walleij 					      align);
3318d318a50SLinus Walleij 		d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
3328d318a50SLinus Walleij 					      align);
3338d318a50SLinus Walleij 	} else {
3348d318a50SLinus Walleij 		d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
3358d318a50SLinus Walleij 					      align);
3368d318a50SLinus Walleij 		d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
3378d318a50SLinus Walleij 					      align);
3388d318a50SLinus Walleij 	}
3398d318a50SLinus Walleij 
3408d318a50SLinus Walleij 	return 0;
3418d318a50SLinus Walleij }
3428d318a50SLinus Walleij 
3438d318a50SLinus Walleij static void d40_pool_lli_free(struct d40_desc *d40d)
3448d318a50SLinus Walleij {
3458d318a50SLinus Walleij 	kfree(d40d->lli_pool.base);
3468d318a50SLinus Walleij 	d40d->lli_pool.base = NULL;
3478d318a50SLinus Walleij 	d40d->lli_pool.size = 0;
3488d318a50SLinus Walleij 	d40d->lli_log.src = NULL;
3498d318a50SLinus Walleij 	d40d->lli_log.dst = NULL;
3508d318a50SLinus Walleij 	d40d->lli_phy.src = NULL;
3518d318a50SLinus Walleij 	d40d->lli_phy.dst = NULL;
3528d318a50SLinus Walleij }
3538d318a50SLinus Walleij 
354698e4732SJonas Aaberg static int d40_lcla_alloc_one(struct d40_chan *d40c,
355698e4732SJonas Aaberg 			      struct d40_desc *d40d)
356698e4732SJonas Aaberg {
357698e4732SJonas Aaberg 	unsigned long flags;
358698e4732SJonas Aaberg 	int i;
359698e4732SJonas Aaberg 	int ret = -EINVAL;
360698e4732SJonas Aaberg 	int p;
361698e4732SJonas Aaberg 
362698e4732SJonas Aaberg 	spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
363698e4732SJonas Aaberg 
364698e4732SJonas Aaberg 	p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP;
365698e4732SJonas Aaberg 
366698e4732SJonas Aaberg 	/*
367698e4732SJonas Aaberg 	 * Allocate both src and dst at the same time, therefore the half
368698e4732SJonas Aaberg 	 * start on 1 since 0 can't be used since zero is used as end marker.
369698e4732SJonas Aaberg 	 */
370698e4732SJonas Aaberg 	for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
371698e4732SJonas Aaberg 		if (!d40c->base->lcla_pool.alloc_map[p + i]) {
372698e4732SJonas Aaberg 			d40c->base->lcla_pool.alloc_map[p + i] = d40d;
373698e4732SJonas Aaberg 			d40d->lcla_alloc++;
374698e4732SJonas Aaberg 			ret = i;
375698e4732SJonas Aaberg 			break;
376698e4732SJonas Aaberg 		}
377698e4732SJonas Aaberg 	}
378698e4732SJonas Aaberg 
379698e4732SJonas Aaberg 	spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
380698e4732SJonas Aaberg 
381698e4732SJonas Aaberg 	return ret;
382698e4732SJonas Aaberg }
383698e4732SJonas Aaberg 
384698e4732SJonas Aaberg static int d40_lcla_free_all(struct d40_chan *d40c,
385698e4732SJonas Aaberg 			     struct d40_desc *d40d)
386698e4732SJonas Aaberg {
387698e4732SJonas Aaberg 	unsigned long flags;
388698e4732SJonas Aaberg 	int i;
389698e4732SJonas Aaberg 	int ret = -EINVAL;
390698e4732SJonas Aaberg 
391698e4732SJonas Aaberg 	if (d40c->log_num == D40_PHY_CHAN)
392698e4732SJonas Aaberg 		return 0;
393698e4732SJonas Aaberg 
394698e4732SJonas Aaberg 	spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
395698e4732SJonas Aaberg 
396698e4732SJonas Aaberg 	for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
397698e4732SJonas Aaberg 		if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
398698e4732SJonas Aaberg 						    D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) {
399698e4732SJonas Aaberg 			d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
400698e4732SJonas Aaberg 							D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL;
401698e4732SJonas Aaberg 			d40d->lcla_alloc--;
402698e4732SJonas Aaberg 			if (d40d->lcla_alloc == 0) {
403698e4732SJonas Aaberg 				ret = 0;
404698e4732SJonas Aaberg 				break;
405698e4732SJonas Aaberg 			}
406698e4732SJonas Aaberg 		}
407698e4732SJonas Aaberg 	}
408698e4732SJonas Aaberg 
409698e4732SJonas Aaberg 	spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
410698e4732SJonas Aaberg 
411698e4732SJonas Aaberg 	return ret;
412698e4732SJonas Aaberg 
413698e4732SJonas Aaberg }
414698e4732SJonas Aaberg 
4158d318a50SLinus Walleij static void d40_desc_remove(struct d40_desc *d40d)
4168d318a50SLinus Walleij {
4178d318a50SLinus Walleij 	list_del(&d40d->node);
4188d318a50SLinus Walleij }
4198d318a50SLinus Walleij 
4208d318a50SLinus Walleij static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
4218d318a50SLinus Walleij {
4228d318a50SLinus Walleij 	struct d40_desc *d;
4238d318a50SLinus Walleij 	struct d40_desc *_d;
4248d318a50SLinus Walleij 
4258d318a50SLinus Walleij 	if (!list_empty(&d40c->client)) {
4268d318a50SLinus Walleij 		list_for_each_entry_safe(d, _d, &d40c->client, node)
4278d318a50SLinus Walleij 			if (async_tx_test_ack(&d->txd)) {
4288d318a50SLinus Walleij 				d40_pool_lli_free(d);
4298d318a50SLinus Walleij 				d40_desc_remove(d);
430c675b1b4SJonas Aaberg 				break;
4318d318a50SLinus Walleij 			}
4328d318a50SLinus Walleij 	} else {
433c675b1b4SJonas Aaberg 		d = kmem_cache_alloc(d40c->base->desc_slab, GFP_NOWAIT);
434c675b1b4SJonas Aaberg 		if (d != NULL) {
435c675b1b4SJonas Aaberg 			memset(d, 0, sizeof(struct d40_desc));
436c675b1b4SJonas Aaberg 			INIT_LIST_HEAD(&d->node);
4378d318a50SLinus Walleij 		}
438c675b1b4SJonas Aaberg 	}
439c675b1b4SJonas Aaberg 	return d;
4408d318a50SLinus Walleij }
4418d318a50SLinus Walleij 
4428d318a50SLinus Walleij static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
4438d318a50SLinus Walleij {
444698e4732SJonas Aaberg 
445698e4732SJonas Aaberg 	d40_lcla_free_all(d40c, d40d);
446c675b1b4SJonas Aaberg 	kmem_cache_free(d40c->base->desc_slab, d40d);
4478d318a50SLinus Walleij }
4488d318a50SLinus Walleij 
4498d318a50SLinus Walleij static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
4508d318a50SLinus Walleij {
4518d318a50SLinus Walleij 	list_add_tail(&desc->node, &d40c->active);
4528d318a50SLinus Walleij }
4538d318a50SLinus Walleij 
454698e4732SJonas Aaberg static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
455698e4732SJonas Aaberg {
456698e4732SJonas Aaberg 	int curr_lcla = -EINVAL, next_lcla;
457698e4732SJonas Aaberg 
458698e4732SJonas Aaberg 	if (d40c->log_num == D40_PHY_CHAN) {
459698e4732SJonas Aaberg 		d40_phy_lli_write(d40c->base->virtbase,
460698e4732SJonas Aaberg 				  d40c->phy_chan->num,
461698e4732SJonas Aaberg 				  d40d->lli_phy.dst,
462698e4732SJonas Aaberg 				  d40d->lli_phy.src);
463698e4732SJonas Aaberg 		d40d->lli_current = d40d->lli_len;
464698e4732SJonas Aaberg 	} else {
465698e4732SJonas Aaberg 
466698e4732SJonas Aaberg 		if ((d40d->lli_len - d40d->lli_current) > 1)
467698e4732SJonas Aaberg 			curr_lcla = d40_lcla_alloc_one(d40c, d40d);
468698e4732SJonas Aaberg 
469698e4732SJonas Aaberg 		d40_log_lli_lcpa_write(d40c->lcpa,
470698e4732SJonas Aaberg 				       &d40d->lli_log.dst[d40d->lli_current],
471698e4732SJonas Aaberg 				       &d40d->lli_log.src[d40d->lli_current],
472698e4732SJonas Aaberg 				       curr_lcla);
473698e4732SJonas Aaberg 
474698e4732SJonas Aaberg 		d40d->lli_current++;
475698e4732SJonas Aaberg 		for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) {
476698e4732SJonas Aaberg 			struct d40_log_lli *lcla;
477698e4732SJonas Aaberg 
478698e4732SJonas Aaberg 			if (d40d->lli_current + 1 < d40d->lli_len)
479698e4732SJonas Aaberg 				next_lcla = d40_lcla_alloc_one(d40c, d40d);
480698e4732SJonas Aaberg 			else
481698e4732SJonas Aaberg 				next_lcla = -EINVAL;
482698e4732SJonas Aaberg 
483698e4732SJonas Aaberg 			lcla = d40c->base->lcla_pool.base +
484698e4732SJonas Aaberg 				d40c->phy_chan->num * 1024 +
485698e4732SJonas Aaberg 				8 * curr_lcla * 2;
486698e4732SJonas Aaberg 
487698e4732SJonas Aaberg 			d40_log_lli_lcla_write(lcla,
488698e4732SJonas Aaberg 					       &d40d->lli_log.dst[d40d->lli_current],
489698e4732SJonas Aaberg 					       &d40d->lli_log.src[d40d->lli_current],
490698e4732SJonas Aaberg 					       next_lcla);
491698e4732SJonas Aaberg 
492698e4732SJonas Aaberg 			(void) dma_map_single(d40c->base->dev, lcla,
493698e4732SJonas Aaberg 					      2 * sizeof(struct d40_log_lli),
494698e4732SJonas Aaberg 					      DMA_TO_DEVICE);
495698e4732SJonas Aaberg 
496698e4732SJonas Aaberg 			curr_lcla = next_lcla;
497698e4732SJonas Aaberg 
498698e4732SJonas Aaberg 			if (curr_lcla == -EINVAL) {
499698e4732SJonas Aaberg 				d40d->lli_current++;
500698e4732SJonas Aaberg 				break;
501698e4732SJonas Aaberg 			}
502698e4732SJonas Aaberg 
503698e4732SJonas Aaberg 		}
504698e4732SJonas Aaberg 	}
505698e4732SJonas Aaberg }
506698e4732SJonas Aaberg 
5078d318a50SLinus Walleij static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
5088d318a50SLinus Walleij {
5098d318a50SLinus Walleij 	struct d40_desc *d;
5108d318a50SLinus Walleij 
5118d318a50SLinus Walleij 	if (list_empty(&d40c->active))
5128d318a50SLinus Walleij 		return NULL;
5138d318a50SLinus Walleij 
5148d318a50SLinus Walleij 	d = list_first_entry(&d40c->active,
5158d318a50SLinus Walleij 			     struct d40_desc,
5168d318a50SLinus Walleij 			     node);
5178d318a50SLinus Walleij 	return d;
5188d318a50SLinus Walleij }
5198d318a50SLinus Walleij 
5208d318a50SLinus Walleij static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
5218d318a50SLinus Walleij {
5228d318a50SLinus Walleij 	list_add_tail(&desc->node, &d40c->queue);
5238d318a50SLinus Walleij }
5248d318a50SLinus Walleij 
5258d318a50SLinus Walleij static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
5268d318a50SLinus Walleij {
5278d318a50SLinus Walleij 	struct d40_desc *d;
5288d318a50SLinus Walleij 
5298d318a50SLinus Walleij 	if (list_empty(&d40c->queue))
5308d318a50SLinus Walleij 		return NULL;
5318d318a50SLinus Walleij 
5328d318a50SLinus Walleij 	d = list_first_entry(&d40c->queue,
5338d318a50SLinus Walleij 			     struct d40_desc,
5348d318a50SLinus Walleij 			     node);
5358d318a50SLinus Walleij 	return d;
5368d318a50SLinus Walleij }
5378d318a50SLinus Walleij 
538aa182ae2SJonas Aaberg static struct d40_desc *d40_last_queued(struct d40_chan *d40c)
539aa182ae2SJonas Aaberg {
540aa182ae2SJonas Aaberg 	struct d40_desc *d;
541aa182ae2SJonas Aaberg 
542aa182ae2SJonas Aaberg 	if (list_empty(&d40c->queue))
543aa182ae2SJonas Aaberg 		return NULL;
544aa182ae2SJonas Aaberg 	list_for_each_entry(d, &d40c->queue, node)
545aa182ae2SJonas Aaberg 		if (list_is_last(&d->node, &d40c->queue))
546aa182ae2SJonas Aaberg 			break;
547aa182ae2SJonas Aaberg 	return d;
548aa182ae2SJonas Aaberg }
549aa182ae2SJonas Aaberg 
5508d318a50SLinus Walleij /* Support functions for logical channels */
5518d318a50SLinus Walleij 
5528d318a50SLinus Walleij 
5538d318a50SLinus Walleij static int d40_channel_execute_command(struct d40_chan *d40c,
5548d318a50SLinus Walleij 				       enum d40_command command)
5558d318a50SLinus Walleij {
556767a9675SJonas Aaberg 	u32 status;
557767a9675SJonas Aaberg 	int i;
5588d318a50SLinus Walleij 	void __iomem *active_reg;
5598d318a50SLinus Walleij 	int ret = 0;
5608d318a50SLinus Walleij 	unsigned long flags;
5611d392a7bSJonas Aaberg 	u32 wmask;
5628d318a50SLinus Walleij 
5638d318a50SLinus Walleij 	spin_lock_irqsave(&d40c->base->execmd_lock, flags);
5648d318a50SLinus Walleij 
5658d318a50SLinus Walleij 	if (d40c->phy_chan->num % 2 == 0)
5668d318a50SLinus Walleij 		active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
5678d318a50SLinus Walleij 	else
5688d318a50SLinus Walleij 		active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
5698d318a50SLinus Walleij 
5708d318a50SLinus Walleij 	if (command == D40_DMA_SUSPEND_REQ) {
5718d318a50SLinus Walleij 		status = (readl(active_reg) &
5728d318a50SLinus Walleij 			  D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
5738d318a50SLinus Walleij 			D40_CHAN_POS(d40c->phy_chan->num);
5748d318a50SLinus Walleij 
5758d318a50SLinus Walleij 		if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
5768d318a50SLinus Walleij 			goto done;
5778d318a50SLinus Walleij 	}
5788d318a50SLinus Walleij 
5791d392a7bSJonas Aaberg 	wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
5801d392a7bSJonas Aaberg 	writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
5811d392a7bSJonas Aaberg 	       active_reg);
5828d318a50SLinus Walleij 
5838d318a50SLinus Walleij 	if (command == D40_DMA_SUSPEND_REQ) {
5848d318a50SLinus Walleij 
5858d318a50SLinus Walleij 		for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
5868d318a50SLinus Walleij 			status = (readl(active_reg) &
5878d318a50SLinus Walleij 				  D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
5888d318a50SLinus Walleij 				D40_CHAN_POS(d40c->phy_chan->num);
5898d318a50SLinus Walleij 
5908d318a50SLinus Walleij 			cpu_relax();
5918d318a50SLinus Walleij 			/*
5928d318a50SLinus Walleij 			 * Reduce the number of bus accesses while
5938d318a50SLinus Walleij 			 * waiting for the DMA to suspend.
5948d318a50SLinus Walleij 			 */
5958d318a50SLinus Walleij 			udelay(3);
5968d318a50SLinus Walleij 
5978d318a50SLinus Walleij 			if (status == D40_DMA_STOP ||
5988d318a50SLinus Walleij 			    status == D40_DMA_SUSPENDED)
5998d318a50SLinus Walleij 				break;
6008d318a50SLinus Walleij 		}
6018d318a50SLinus Walleij 
6028d318a50SLinus Walleij 		if (i == D40_SUSPEND_MAX_IT) {
6038d318a50SLinus Walleij 			dev_err(&d40c->chan.dev->device,
6048d318a50SLinus Walleij 				"[%s]: unable to suspend the chl %d (log: %d) status %x\n",
6058d318a50SLinus Walleij 				__func__, d40c->phy_chan->num, d40c->log_num,
6068d318a50SLinus Walleij 				status);
6078d318a50SLinus Walleij 			dump_stack();
6088d318a50SLinus Walleij 			ret = -EBUSY;
6098d318a50SLinus Walleij 		}
6108d318a50SLinus Walleij 
6118d318a50SLinus Walleij 	}
6128d318a50SLinus Walleij done:
6138d318a50SLinus Walleij 	spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
6148d318a50SLinus Walleij 	return ret;
6158d318a50SLinus Walleij }
6168d318a50SLinus Walleij 
6178d318a50SLinus Walleij static void d40_term_all(struct d40_chan *d40c)
6188d318a50SLinus Walleij {
6198d318a50SLinus Walleij 	struct d40_desc *d40d;
6208d318a50SLinus Walleij 
6218d318a50SLinus Walleij 	/* Release active descriptors */
6228d318a50SLinus Walleij 	while ((d40d = d40_first_active_get(d40c))) {
6238d318a50SLinus Walleij 		d40_desc_remove(d40d);
6248d318a50SLinus Walleij 		d40_desc_free(d40c, d40d);
6258d318a50SLinus Walleij 	}
6268d318a50SLinus Walleij 
6278d318a50SLinus Walleij 	/* Release queued descriptors waiting for transfer */
6288d318a50SLinus Walleij 	while ((d40d = d40_first_queued(d40c))) {
6298d318a50SLinus Walleij 		d40_desc_remove(d40d);
6308d318a50SLinus Walleij 		d40_desc_free(d40c, d40d);
6318d318a50SLinus Walleij 	}
6328d318a50SLinus Walleij 
6338d318a50SLinus Walleij 
6348d318a50SLinus Walleij 	d40c->pending_tx = 0;
6358d318a50SLinus Walleij 	d40c->busy = false;
6368d318a50SLinus Walleij }
6378d318a50SLinus Walleij 
6388d318a50SLinus Walleij static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
6398d318a50SLinus Walleij {
6408d318a50SLinus Walleij 	u32 val;
6418d318a50SLinus Walleij 	unsigned long flags;
6428d318a50SLinus Walleij 
6430c32269dSJonas Aaberg 	/* Notice, that disable requires the physical channel to be stopped */
6448d318a50SLinus Walleij 	if (do_enable)
6458d318a50SLinus Walleij 		val = D40_ACTIVATE_EVENTLINE;
6468d318a50SLinus Walleij 	else
6478d318a50SLinus Walleij 		val = D40_DEACTIVATE_EVENTLINE;
6488d318a50SLinus Walleij 
6498d318a50SLinus Walleij 	spin_lock_irqsave(&d40c->phy_chan->lock, flags);
6508d318a50SLinus Walleij 
6518d318a50SLinus Walleij 	/* Enable event line connected to device (or memcpy) */
6528d318a50SLinus Walleij 	if ((d40c->dma_cfg.dir ==  STEDMA40_PERIPH_TO_MEM) ||
6538d318a50SLinus Walleij 	    (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
6548d318a50SLinus Walleij 		u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
6558d318a50SLinus Walleij 
6568d318a50SLinus Walleij 		writel((val << D40_EVENTLINE_POS(event)) |
6578d318a50SLinus Walleij 		       ~D40_EVENTLINE_MASK(event),
6588d318a50SLinus Walleij 		       d40c->base->virtbase + D40_DREG_PCBASE +
6598d318a50SLinus Walleij 		       d40c->phy_chan->num * D40_DREG_PCDELTA +
6608d318a50SLinus Walleij 		       D40_CHAN_REG_SSLNK);
6618d318a50SLinus Walleij 	}
6628d318a50SLinus Walleij 	if (d40c->dma_cfg.dir !=  STEDMA40_PERIPH_TO_MEM) {
6638d318a50SLinus Walleij 		u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
6648d318a50SLinus Walleij 
6658d318a50SLinus Walleij 		writel((val << D40_EVENTLINE_POS(event)) |
6668d318a50SLinus Walleij 		       ~D40_EVENTLINE_MASK(event),
6678d318a50SLinus Walleij 		       d40c->base->virtbase + D40_DREG_PCBASE +
6688d318a50SLinus Walleij 		       d40c->phy_chan->num * D40_DREG_PCDELTA +
6698d318a50SLinus Walleij 		       D40_CHAN_REG_SDLNK);
6708d318a50SLinus Walleij 	}
6718d318a50SLinus Walleij 
6728d318a50SLinus Walleij 	spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
6738d318a50SLinus Walleij }
6748d318a50SLinus Walleij 
675a5ebca47SJonas Aaberg static u32 d40_chan_has_events(struct d40_chan *d40c)
6768d318a50SLinus Walleij {
677be8cb7dfSJonas Aaberg 	u32 val;
6788d318a50SLinus Walleij 
6798d318a50SLinus Walleij 	val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
6808d318a50SLinus Walleij 		    d40c->phy_chan->num * D40_DREG_PCDELTA +
6818d318a50SLinus Walleij 		    D40_CHAN_REG_SSLNK);
6828d318a50SLinus Walleij 
683be8cb7dfSJonas Aaberg 	val |= readl(d40c->base->virtbase + D40_DREG_PCBASE +
6848d318a50SLinus Walleij 		     d40c->phy_chan->num * D40_DREG_PCDELTA +
6858d318a50SLinus Walleij 		     D40_CHAN_REG_SDLNK);
686a5ebca47SJonas Aaberg 	return val;
6878d318a50SLinus Walleij }
6888d318a50SLinus Walleij 
689b55912c6SJonas Aaberg static void d40_config_write(struct d40_chan *d40c)
6908d318a50SLinus Walleij {
6918d318a50SLinus Walleij 	u32 addr_base;
6928d318a50SLinus Walleij 	u32 var;
6938d318a50SLinus Walleij 
6948d318a50SLinus Walleij 	/* Odd addresses are even addresses + 4 */
6958d318a50SLinus Walleij 	addr_base = (d40c->phy_chan->num % 2) * 4;
6968d318a50SLinus Walleij 	/* Setup channel mode to logical or physical */
6978d318a50SLinus Walleij 	var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
6988d318a50SLinus Walleij 		D40_CHAN_POS(d40c->phy_chan->num);
6998d318a50SLinus Walleij 	writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
7008d318a50SLinus Walleij 
7018d318a50SLinus Walleij 	/* Setup operational mode option register */
7028d318a50SLinus Walleij 	var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
7038d318a50SLinus Walleij 	       0x3) << D40_CHAN_POS(d40c->phy_chan->num);
7048d318a50SLinus Walleij 
7058d318a50SLinus Walleij 	writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
7068d318a50SLinus Walleij 
7078d318a50SLinus Walleij 	if (d40c->log_num != D40_PHY_CHAN) {
7088d318a50SLinus Walleij 		/* Set default config for CFG reg */
7098d318a50SLinus Walleij 		writel(d40c->src_def_cfg,
7108d318a50SLinus Walleij 		       d40c->base->virtbase + D40_DREG_PCBASE +
7118d318a50SLinus Walleij 		       d40c->phy_chan->num * D40_DREG_PCDELTA +
7128d318a50SLinus Walleij 		       D40_CHAN_REG_SSCFG);
7138d318a50SLinus Walleij 		writel(d40c->dst_def_cfg,
7148d318a50SLinus Walleij 		       d40c->base->virtbase + D40_DREG_PCBASE +
7158d318a50SLinus Walleij 		       d40c->phy_chan->num * D40_DREG_PCDELTA +
7168d318a50SLinus Walleij 		       D40_CHAN_REG_SDCFG);
7178d318a50SLinus Walleij 
718b55912c6SJonas Aaberg 		/* Set LIDX for lcla */
719b55912c6SJonas Aaberg 		writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
720b55912c6SJonas Aaberg 		       D40_SREG_ELEM_LOG_LIDX_MASK,
721b55912c6SJonas Aaberg 		       d40c->base->virtbase + D40_DREG_PCBASE +
722b55912c6SJonas Aaberg 		       d40c->phy_chan->num * D40_DREG_PCDELTA +
723b55912c6SJonas Aaberg 		       D40_CHAN_REG_SDELT);
724b55912c6SJonas Aaberg 
725b55912c6SJonas Aaberg 		writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
726b55912c6SJonas Aaberg 		       D40_SREG_ELEM_LOG_LIDX_MASK,
727b55912c6SJonas Aaberg 		       d40c->base->virtbase + D40_DREG_PCBASE +
728b55912c6SJonas Aaberg 		       d40c->phy_chan->num * D40_DREG_PCDELTA +
729b55912c6SJonas Aaberg 		       D40_CHAN_REG_SSELT);
730b55912c6SJonas Aaberg 
7318d318a50SLinus Walleij 	}
7328d318a50SLinus Walleij }
7338d318a50SLinus Walleij 
734aa182ae2SJonas Aaberg static u32 d40_residue(struct d40_chan *d40c)
735aa182ae2SJonas Aaberg {
736aa182ae2SJonas Aaberg 	u32 num_elt;
737aa182ae2SJonas Aaberg 
738aa182ae2SJonas Aaberg 	if (d40c->log_num != D40_PHY_CHAN)
739aa182ae2SJonas Aaberg 		num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
740aa182ae2SJonas Aaberg 			>> D40_MEM_LCSP2_ECNT_POS;
741aa182ae2SJonas Aaberg 	else
742aa182ae2SJonas Aaberg 		num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
743aa182ae2SJonas Aaberg 				 d40c->phy_chan->num * D40_DREG_PCDELTA +
744aa182ae2SJonas Aaberg 				 D40_CHAN_REG_SDELT) &
745aa182ae2SJonas Aaberg 			   D40_SREG_ELEM_PHY_ECNT_MASK) >>
746aa182ae2SJonas Aaberg 			D40_SREG_ELEM_PHY_ECNT_POS;
747aa182ae2SJonas Aaberg 	return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
748aa182ae2SJonas Aaberg }
749aa182ae2SJonas Aaberg 
750aa182ae2SJonas Aaberg static bool d40_tx_is_linked(struct d40_chan *d40c)
751aa182ae2SJonas Aaberg {
752aa182ae2SJonas Aaberg 	bool is_link;
753aa182ae2SJonas Aaberg 
754aa182ae2SJonas Aaberg 	if (d40c->log_num != D40_PHY_CHAN)
755aa182ae2SJonas Aaberg 		is_link = readl(&d40c->lcpa->lcsp3) &  D40_MEM_LCSP3_DLOS_MASK;
756aa182ae2SJonas Aaberg 	else
757aa182ae2SJonas Aaberg 		is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
758aa182ae2SJonas Aaberg 				d40c->phy_chan->num * D40_DREG_PCDELTA +
759aa182ae2SJonas Aaberg 				D40_CHAN_REG_SDLNK) &
760aa182ae2SJonas Aaberg 			D40_SREG_LNK_PHYS_LNK_MASK;
761aa182ae2SJonas Aaberg 	return is_link;
762aa182ae2SJonas Aaberg }
763aa182ae2SJonas Aaberg 
764aa182ae2SJonas Aaberg static int d40_pause(struct dma_chan *chan)
765aa182ae2SJonas Aaberg {
766aa182ae2SJonas Aaberg 	struct d40_chan *d40c =
767aa182ae2SJonas Aaberg 		container_of(chan, struct d40_chan, chan);
768aa182ae2SJonas Aaberg 	int res = 0;
769aa182ae2SJonas Aaberg 	unsigned long flags;
770aa182ae2SJonas Aaberg 
7713ac012afSJonas Aaberg 	if (!d40c->busy)
7723ac012afSJonas Aaberg 		return 0;
7733ac012afSJonas Aaberg 
774aa182ae2SJonas Aaberg 	spin_lock_irqsave(&d40c->lock, flags);
775aa182ae2SJonas Aaberg 
776aa182ae2SJonas Aaberg 	res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
777aa182ae2SJonas Aaberg 	if (res == 0) {
778aa182ae2SJonas Aaberg 		if (d40c->log_num != D40_PHY_CHAN) {
779aa182ae2SJonas Aaberg 			d40_config_set_event(d40c, false);
780aa182ae2SJonas Aaberg 			/* Resume the other logical channels if any */
781aa182ae2SJonas Aaberg 			if (d40_chan_has_events(d40c))
782aa182ae2SJonas Aaberg 				res = d40_channel_execute_command(d40c,
783aa182ae2SJonas Aaberg 								  D40_DMA_RUN);
784aa182ae2SJonas Aaberg 		}
785aa182ae2SJonas Aaberg 	}
786aa182ae2SJonas Aaberg 
787aa182ae2SJonas Aaberg 	spin_unlock_irqrestore(&d40c->lock, flags);
788aa182ae2SJonas Aaberg 	return res;
789aa182ae2SJonas Aaberg }
790aa182ae2SJonas Aaberg 
791aa182ae2SJonas Aaberg static int d40_resume(struct dma_chan *chan)
792aa182ae2SJonas Aaberg {
793aa182ae2SJonas Aaberg 	struct d40_chan *d40c =
794aa182ae2SJonas Aaberg 		container_of(chan, struct d40_chan, chan);
795aa182ae2SJonas Aaberg 	int res = 0;
796aa182ae2SJonas Aaberg 	unsigned long flags;
797aa182ae2SJonas Aaberg 
7983ac012afSJonas Aaberg 	if (!d40c->busy)
7993ac012afSJonas Aaberg 		return 0;
8003ac012afSJonas Aaberg 
801aa182ae2SJonas Aaberg 	spin_lock_irqsave(&d40c->lock, flags);
802aa182ae2SJonas Aaberg 
803aa182ae2SJonas Aaberg 	if (d40c->base->rev == 0)
804aa182ae2SJonas Aaberg 		if (d40c->log_num != D40_PHY_CHAN) {
805aa182ae2SJonas Aaberg 			res = d40_channel_execute_command(d40c,
806aa182ae2SJonas Aaberg 							  D40_DMA_SUSPEND_REQ);
807aa182ae2SJonas Aaberg 			goto no_suspend;
808aa182ae2SJonas Aaberg 		}
809aa182ae2SJonas Aaberg 
810aa182ae2SJonas Aaberg 	/* If bytes left to transfer or linked tx resume job */
811aa182ae2SJonas Aaberg 	if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
812aa182ae2SJonas Aaberg 
813aa182ae2SJonas Aaberg 		if (d40c->log_num != D40_PHY_CHAN)
814aa182ae2SJonas Aaberg 			d40_config_set_event(d40c, true);
815aa182ae2SJonas Aaberg 
816aa182ae2SJonas Aaberg 		res = d40_channel_execute_command(d40c, D40_DMA_RUN);
817aa182ae2SJonas Aaberg 	}
818aa182ae2SJonas Aaberg 
819aa182ae2SJonas Aaberg no_suspend:
820aa182ae2SJonas Aaberg 	spin_unlock_irqrestore(&d40c->lock, flags);
821aa182ae2SJonas Aaberg 	return res;
822aa182ae2SJonas Aaberg }
823aa182ae2SJonas Aaberg 
824aa182ae2SJonas Aaberg static void d40_tx_submit_log(struct d40_chan *d40c, struct d40_desc *d40d)
825aa182ae2SJonas Aaberg {
826aa182ae2SJonas Aaberg 	/* TODO: Write */
827aa182ae2SJonas Aaberg }
828aa182ae2SJonas Aaberg 
829aa182ae2SJonas Aaberg static void d40_tx_submit_phy(struct d40_chan *d40c, struct d40_desc *d40d)
830aa182ae2SJonas Aaberg {
831aa182ae2SJonas Aaberg 	struct d40_desc *d40d_prev = NULL;
832aa182ae2SJonas Aaberg 	int i;
833aa182ae2SJonas Aaberg 	u32 val;
834aa182ae2SJonas Aaberg 
835aa182ae2SJonas Aaberg 	if (!list_empty(&d40c->queue))
836aa182ae2SJonas Aaberg 		d40d_prev = d40_last_queued(d40c);
837aa182ae2SJonas Aaberg 	else if (!list_empty(&d40c->active))
838aa182ae2SJonas Aaberg 		d40d_prev = d40_first_active_get(d40c);
839aa182ae2SJonas Aaberg 
840aa182ae2SJonas Aaberg 	if (!d40d_prev)
841aa182ae2SJonas Aaberg 		return;
842aa182ae2SJonas Aaberg 
843aa182ae2SJonas Aaberg 	/* Here we try to join this job with previous jobs */
844aa182ae2SJonas Aaberg 	val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
845aa182ae2SJonas Aaberg 		    d40c->phy_chan->num * D40_DREG_PCDELTA +
846aa182ae2SJonas Aaberg 		    D40_CHAN_REG_SSLNK);
847aa182ae2SJonas Aaberg 
848aa182ae2SJonas Aaberg 	/* Figure out which link we're currently transmitting */
849aa182ae2SJonas Aaberg 	for (i = 0; i < d40d_prev->lli_len; i++)
850aa182ae2SJonas Aaberg 		if (val == d40d_prev->lli_phy.src[i].reg_lnk)
851aa182ae2SJonas Aaberg 			break;
852aa182ae2SJonas Aaberg 
853aa182ae2SJonas Aaberg 	val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
854aa182ae2SJonas Aaberg 		    d40c->phy_chan->num * D40_DREG_PCDELTA +
855aa182ae2SJonas Aaberg 		    D40_CHAN_REG_SSELT) >> D40_SREG_ELEM_LOG_ECNT_POS;
856aa182ae2SJonas Aaberg 
857aa182ae2SJonas Aaberg 	if (i == (d40d_prev->lli_len - 1) && val > 0) {
858aa182ae2SJonas Aaberg 		/* Change the current one */
859aa182ae2SJonas Aaberg 		writel(virt_to_phys(d40d->lli_phy.src),
860aa182ae2SJonas Aaberg 		       d40c->base->virtbase + D40_DREG_PCBASE +
861aa182ae2SJonas Aaberg 		       d40c->phy_chan->num * D40_DREG_PCDELTA +
862aa182ae2SJonas Aaberg 		       D40_CHAN_REG_SSLNK);
863aa182ae2SJonas Aaberg 		writel(virt_to_phys(d40d->lli_phy.dst),
864aa182ae2SJonas Aaberg 		       d40c->base->virtbase + D40_DREG_PCBASE +
865aa182ae2SJonas Aaberg 		       d40c->phy_chan->num * D40_DREG_PCDELTA +
866aa182ae2SJonas Aaberg 		       D40_CHAN_REG_SDLNK);
867aa182ae2SJonas Aaberg 
868aa182ae2SJonas Aaberg 		d40d->is_hw_linked = true;
869aa182ae2SJonas Aaberg 
870aa182ae2SJonas Aaberg 	} else if (i < d40d_prev->lli_len) {
871aa182ae2SJonas Aaberg 		(void) dma_unmap_single(d40c->base->dev,
872aa182ae2SJonas Aaberg 					virt_to_phys(d40d_prev->lli_phy.src),
873aa182ae2SJonas Aaberg 					d40d_prev->lli_pool.size,
874aa182ae2SJonas Aaberg 					DMA_TO_DEVICE);
875aa182ae2SJonas Aaberg 
876aa182ae2SJonas Aaberg 		/* Keep the settings */
877aa182ae2SJonas Aaberg 		val = d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk &
878aa182ae2SJonas Aaberg 			~D40_SREG_LNK_PHYS_LNK_MASK;
879aa182ae2SJonas Aaberg 		d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk =
880aa182ae2SJonas Aaberg 			val | virt_to_phys(d40d->lli_phy.src);
881aa182ae2SJonas Aaberg 
882aa182ae2SJonas Aaberg 		val = d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk &
883aa182ae2SJonas Aaberg 			~D40_SREG_LNK_PHYS_LNK_MASK;
884aa182ae2SJonas Aaberg 		d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk =
885aa182ae2SJonas Aaberg 			val | virt_to_phys(d40d->lli_phy.dst);
886aa182ae2SJonas Aaberg 
887aa182ae2SJonas Aaberg 		(void) dma_map_single(d40c->base->dev,
888aa182ae2SJonas Aaberg 				      d40d_prev->lli_phy.src,
889aa182ae2SJonas Aaberg 				      d40d_prev->lli_pool.size,
890aa182ae2SJonas Aaberg 				      DMA_TO_DEVICE);
891aa182ae2SJonas Aaberg 		d40d->is_hw_linked = true;
892aa182ae2SJonas Aaberg 	}
893aa182ae2SJonas Aaberg }
894aa182ae2SJonas Aaberg 
8958d318a50SLinus Walleij static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
8968d318a50SLinus Walleij {
8978d318a50SLinus Walleij 	struct d40_chan *d40c = container_of(tx->chan,
8988d318a50SLinus Walleij 					     struct d40_chan,
8998d318a50SLinus Walleij 					     chan);
9008d318a50SLinus Walleij 	struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
9018d318a50SLinus Walleij 	unsigned long flags;
9028d318a50SLinus Walleij 
903aa182ae2SJonas Aaberg 	(void) d40_pause(&d40c->chan);
904aa182ae2SJonas Aaberg 
9058d318a50SLinus Walleij 	spin_lock_irqsave(&d40c->lock, flags);
9068d318a50SLinus Walleij 
907aa182ae2SJonas Aaberg 	d40c->chan.cookie++;
908aa182ae2SJonas Aaberg 
909aa182ae2SJonas Aaberg 	if (d40c->chan.cookie < 0)
910aa182ae2SJonas Aaberg 		d40c->chan.cookie = 1;
911aa182ae2SJonas Aaberg 
912aa182ae2SJonas Aaberg 	d40d->txd.cookie = d40c->chan.cookie;
913aa182ae2SJonas Aaberg 
914aa182ae2SJonas Aaberg 	if (d40c->log_num == D40_PHY_CHAN)
915aa182ae2SJonas Aaberg 		d40_tx_submit_phy(d40c, d40d);
916aa182ae2SJonas Aaberg 	else
917aa182ae2SJonas Aaberg 		d40_tx_submit_log(d40c, d40d);
9188d318a50SLinus Walleij 
9198d318a50SLinus Walleij 	d40_desc_queue(d40c, d40d);
9208d318a50SLinus Walleij 
9218d318a50SLinus Walleij 	spin_unlock_irqrestore(&d40c->lock, flags);
9228d318a50SLinus Walleij 
923aa182ae2SJonas Aaberg 	(void) d40_resume(&d40c->chan);
924aa182ae2SJonas Aaberg 
9258d318a50SLinus Walleij 	return tx->cookie;
9268d318a50SLinus Walleij }
9278d318a50SLinus Walleij 
9288d318a50SLinus Walleij static int d40_start(struct d40_chan *d40c)
9298d318a50SLinus Walleij {
930f4185592SLinus Walleij 	if (d40c->base->rev == 0) {
931f4185592SLinus Walleij 		int err;
932f4185592SLinus Walleij 
933f4185592SLinus Walleij 		if (d40c->log_num != D40_PHY_CHAN) {
934f4185592SLinus Walleij 			err = d40_channel_execute_command(d40c,
935f4185592SLinus Walleij 							  D40_DMA_SUSPEND_REQ);
936f4185592SLinus Walleij 			if (err)
937f4185592SLinus Walleij 				return err;
938f4185592SLinus Walleij 		}
939f4185592SLinus Walleij 	}
940f4185592SLinus Walleij 
9410c32269dSJonas Aaberg 	if (d40c->log_num != D40_PHY_CHAN)
9428d318a50SLinus Walleij 		d40_config_set_event(d40c, true);
9438d318a50SLinus Walleij 
9440c32269dSJonas Aaberg 	return d40_channel_execute_command(d40c, D40_DMA_RUN);
9458d318a50SLinus Walleij }
9468d318a50SLinus Walleij 
9478d318a50SLinus Walleij static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
9488d318a50SLinus Walleij {
9498d318a50SLinus Walleij 	struct d40_desc *d40d;
9508d318a50SLinus Walleij 	int err;
9518d318a50SLinus Walleij 
9528d318a50SLinus Walleij 	/* Start queued jobs, if any */
9538d318a50SLinus Walleij 	d40d = d40_first_queued(d40c);
9548d318a50SLinus Walleij 
9558d318a50SLinus Walleij 	if (d40d != NULL) {
9568d318a50SLinus Walleij 		d40c->busy = true;
9578d318a50SLinus Walleij 
9588d318a50SLinus Walleij 		/* Remove from queue */
9598d318a50SLinus Walleij 		d40_desc_remove(d40d);
9608d318a50SLinus Walleij 
9618d318a50SLinus Walleij 		/* Add to active queue */
9628d318a50SLinus Walleij 		d40_desc_submit(d40c, d40d);
9638d318a50SLinus Walleij 
964aa182ae2SJonas Aaberg 		/*
965aa182ae2SJonas Aaberg 		 * If this job is already linked in hw,
966aa182ae2SJonas Aaberg 		 * do not submit it.
967aa182ae2SJonas Aaberg 		 */
968698e4732SJonas Aaberg 
969aa182ae2SJonas Aaberg 		if (!d40d->is_hw_linked) {
9708d318a50SLinus Walleij 			/* Initiate DMA job */
9718d318a50SLinus Walleij 			d40_desc_load(d40c, d40d);
9728d318a50SLinus Walleij 
9738d318a50SLinus Walleij 			/* Start dma job */
9748d318a50SLinus Walleij 			err = d40_start(d40c);
9758d318a50SLinus Walleij 
9768d318a50SLinus Walleij 			if (err)
9778d318a50SLinus Walleij 				return NULL;
9788d318a50SLinus Walleij 		}
979aa182ae2SJonas Aaberg 	}
9808d318a50SLinus Walleij 
9818d318a50SLinus Walleij 	return d40d;
9828d318a50SLinus Walleij }
9838d318a50SLinus Walleij 
9848d318a50SLinus Walleij /* called from interrupt context */
9858d318a50SLinus Walleij static void dma_tc_handle(struct d40_chan *d40c)
9868d318a50SLinus Walleij {
9878d318a50SLinus Walleij 	struct d40_desc *d40d;
9888d318a50SLinus Walleij 
9898d318a50SLinus Walleij 	/* Get first active entry from list */
9908d318a50SLinus Walleij 	d40d = d40_first_active_get(d40c);
9918d318a50SLinus Walleij 
9928d318a50SLinus Walleij 	if (d40d == NULL)
9938d318a50SLinus Walleij 		return;
9948d318a50SLinus Walleij 
995698e4732SJonas Aaberg 	d40_lcla_free_all(d40c, d40d);
9968d318a50SLinus Walleij 
997698e4732SJonas Aaberg 	if (d40d->lli_current < d40d->lli_len) {
9988d318a50SLinus Walleij 		d40_desc_load(d40c, d40d);
9998d318a50SLinus Walleij 		/* Start dma job */
10008d318a50SLinus Walleij 		(void) d40_start(d40c);
10018d318a50SLinus Walleij 		return;
10028d318a50SLinus Walleij 	}
10038d318a50SLinus Walleij 
10048d318a50SLinus Walleij 	if (d40_queue_start(d40c) == NULL)
10058d318a50SLinus Walleij 		d40c->busy = false;
10068d318a50SLinus Walleij 
10078d318a50SLinus Walleij 	d40c->pending_tx++;
10088d318a50SLinus Walleij 	tasklet_schedule(&d40c->tasklet);
10098d318a50SLinus Walleij 
10108d318a50SLinus Walleij }
10118d318a50SLinus Walleij 
10128d318a50SLinus Walleij static void dma_tasklet(unsigned long data)
10138d318a50SLinus Walleij {
10148d318a50SLinus Walleij 	struct d40_chan *d40c = (struct d40_chan *) data;
1015767a9675SJonas Aaberg 	struct d40_desc *d40d;
10168d318a50SLinus Walleij 	unsigned long flags;
10178d318a50SLinus Walleij 	dma_async_tx_callback callback;
10188d318a50SLinus Walleij 	void *callback_param;
10198d318a50SLinus Walleij 
10208d318a50SLinus Walleij 	spin_lock_irqsave(&d40c->lock, flags);
10218d318a50SLinus Walleij 
10228d318a50SLinus Walleij 	/* Get first active entry from list */
1023767a9675SJonas Aaberg 	d40d = d40_first_active_get(d40c);
10248d318a50SLinus Walleij 
1025767a9675SJonas Aaberg 	if (d40d == NULL)
10268d318a50SLinus Walleij 		goto err;
10278d318a50SLinus Walleij 
1028767a9675SJonas Aaberg 	d40c->completed = d40d->txd.cookie;
10298d318a50SLinus Walleij 
10308d318a50SLinus Walleij 	/*
10318d318a50SLinus Walleij 	 * If terminating a channel pending_tx is set to zero.
10328d318a50SLinus Walleij 	 * This prevents any finished active jobs to return to the client.
10338d318a50SLinus Walleij 	 */
10348d318a50SLinus Walleij 	if (d40c->pending_tx == 0) {
10358d318a50SLinus Walleij 		spin_unlock_irqrestore(&d40c->lock, flags);
10368d318a50SLinus Walleij 		return;
10378d318a50SLinus Walleij 	}
10388d318a50SLinus Walleij 
10398d318a50SLinus Walleij 	/* Callback to client */
1040767a9675SJonas Aaberg 	callback = d40d->txd.callback;
1041767a9675SJonas Aaberg 	callback_param = d40d->txd.callback_param;
10428d318a50SLinus Walleij 
1043767a9675SJonas Aaberg 	if (async_tx_test_ack(&d40d->txd)) {
1044767a9675SJonas Aaberg 		d40_pool_lli_free(d40d);
1045767a9675SJonas Aaberg 		d40_desc_remove(d40d);
1046767a9675SJonas Aaberg 		d40_desc_free(d40c, d40d);
10478d318a50SLinus Walleij 	} else {
1048767a9675SJonas Aaberg 		if (!d40d->is_in_client_list) {
1049767a9675SJonas Aaberg 			d40_desc_remove(d40d);
1050698e4732SJonas Aaberg 			d40_lcla_free_all(d40c, d40d);
1051767a9675SJonas Aaberg 			list_add_tail(&d40d->node, &d40c->client);
1052767a9675SJonas Aaberg 			d40d->is_in_client_list = true;
10538d318a50SLinus Walleij 		}
10548d318a50SLinus Walleij 	}
10558d318a50SLinus Walleij 
10568d318a50SLinus Walleij 	d40c->pending_tx--;
10578d318a50SLinus Walleij 
10588d318a50SLinus Walleij 	if (d40c->pending_tx)
10598d318a50SLinus Walleij 		tasklet_schedule(&d40c->tasklet);
10608d318a50SLinus Walleij 
10618d318a50SLinus Walleij 	spin_unlock_irqrestore(&d40c->lock, flags);
10628d318a50SLinus Walleij 
1063767a9675SJonas Aaberg 	if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
10648d318a50SLinus Walleij 		callback(callback_param);
10658d318a50SLinus Walleij 
10668d318a50SLinus Walleij 	return;
10678d318a50SLinus Walleij 
10688d318a50SLinus Walleij  err:
10698d318a50SLinus Walleij 	/* Rescue manouver if receiving double interrupts */
10708d318a50SLinus Walleij 	if (d40c->pending_tx > 0)
10718d318a50SLinus Walleij 		d40c->pending_tx--;
10728d318a50SLinus Walleij 	spin_unlock_irqrestore(&d40c->lock, flags);
10738d318a50SLinus Walleij }
10748d318a50SLinus Walleij 
10758d318a50SLinus Walleij static irqreturn_t d40_handle_interrupt(int irq, void *data)
10768d318a50SLinus Walleij {
10778d318a50SLinus Walleij 	static const struct d40_interrupt_lookup il[] = {
10788d318a50SLinus Walleij 		{D40_DREG_LCTIS0, D40_DREG_LCICR0, false,  0},
10798d318a50SLinus Walleij 		{D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
10808d318a50SLinus Walleij 		{D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
10818d318a50SLinus Walleij 		{D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
10828d318a50SLinus Walleij 		{D40_DREG_LCEIS0, D40_DREG_LCICR0, true,   0},
10838d318a50SLinus Walleij 		{D40_DREG_LCEIS1, D40_DREG_LCICR1, true,  32},
10848d318a50SLinus Walleij 		{D40_DREG_LCEIS2, D40_DREG_LCICR2, true,  64},
10858d318a50SLinus Walleij 		{D40_DREG_LCEIS3, D40_DREG_LCICR3, true,  96},
10868d318a50SLinus Walleij 		{D40_DREG_PCTIS,  D40_DREG_PCICR,  false, D40_PHY_CHAN},
10878d318a50SLinus Walleij 		{D40_DREG_PCEIS,  D40_DREG_PCICR,  true,  D40_PHY_CHAN},
10888d318a50SLinus Walleij 	};
10898d318a50SLinus Walleij 
10908d318a50SLinus Walleij 	int i;
10918d318a50SLinus Walleij 	u32 regs[ARRAY_SIZE(il)];
10928d318a50SLinus Walleij 	u32 idx;
10938d318a50SLinus Walleij 	u32 row;
10948d318a50SLinus Walleij 	long chan = -1;
10958d318a50SLinus Walleij 	struct d40_chan *d40c;
10968d318a50SLinus Walleij 	unsigned long flags;
10978d318a50SLinus Walleij 	struct d40_base *base = data;
10988d318a50SLinus Walleij 
10998d318a50SLinus Walleij 	spin_lock_irqsave(&base->interrupt_lock, flags);
11008d318a50SLinus Walleij 
11018d318a50SLinus Walleij 	/* Read interrupt status of both logical and physical channels */
11028d318a50SLinus Walleij 	for (i = 0; i < ARRAY_SIZE(il); i++)
11038d318a50SLinus Walleij 		regs[i] = readl(base->virtbase + il[i].src);
11048d318a50SLinus Walleij 
11058d318a50SLinus Walleij 	for (;;) {
11068d318a50SLinus Walleij 
11078d318a50SLinus Walleij 		chan = find_next_bit((unsigned long *)regs,
11088d318a50SLinus Walleij 				     BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
11098d318a50SLinus Walleij 
11108d318a50SLinus Walleij 		/* No more set bits found? */
11118d318a50SLinus Walleij 		if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
11128d318a50SLinus Walleij 			break;
11138d318a50SLinus Walleij 
11148d318a50SLinus Walleij 		row = chan / BITS_PER_LONG;
11158d318a50SLinus Walleij 		idx = chan & (BITS_PER_LONG - 1);
11168d318a50SLinus Walleij 
11178d318a50SLinus Walleij 		/* ACK interrupt */
11181b00348dSJonas Aaberg 		writel(1 << idx, base->virtbase + il[row].clr);
11198d318a50SLinus Walleij 
11208d318a50SLinus Walleij 		if (il[row].offset == D40_PHY_CHAN)
11218d318a50SLinus Walleij 			d40c = base->lookup_phy_chans[idx];
11228d318a50SLinus Walleij 		else
11238d318a50SLinus Walleij 			d40c = base->lookup_log_chans[il[row].offset + idx];
11248d318a50SLinus Walleij 		spin_lock(&d40c->lock);
11258d318a50SLinus Walleij 
11268d318a50SLinus Walleij 		if (!il[row].is_error)
11278d318a50SLinus Walleij 			dma_tc_handle(d40c);
11288d318a50SLinus Walleij 		else
1129508849adSLinus Walleij 			dev_err(base->dev,
1130508849adSLinus Walleij 				"[%s] IRQ chan: %ld offset %d idx %d\n",
11318d318a50SLinus Walleij 				__func__, chan, il[row].offset, idx);
11328d318a50SLinus Walleij 
11338d318a50SLinus Walleij 		spin_unlock(&d40c->lock);
11348d318a50SLinus Walleij 	}
11358d318a50SLinus Walleij 
11368d318a50SLinus Walleij 	spin_unlock_irqrestore(&base->interrupt_lock, flags);
11378d318a50SLinus Walleij 
11388d318a50SLinus Walleij 	return IRQ_HANDLED;
11398d318a50SLinus Walleij }
11408d318a50SLinus Walleij 
11418d318a50SLinus Walleij static int d40_validate_conf(struct d40_chan *d40c,
11428d318a50SLinus Walleij 			     struct stedma40_chan_cfg *conf)
11438d318a50SLinus Walleij {
11448d318a50SLinus Walleij 	int res = 0;
11458d318a50SLinus Walleij 	u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
11468d318a50SLinus Walleij 	u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
11478d318a50SLinus Walleij 	bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
11488d318a50SLinus Walleij 		== STEDMA40_CHANNEL_IN_LOG_MODE;
11498d318a50SLinus Walleij 
11500747c7baSLinus Walleij 	if (!conf->dir) {
11510747c7baSLinus Walleij 		dev_err(&d40c->chan.dev->device, "[%s] Invalid direction.\n",
11520747c7baSLinus Walleij 			__func__);
11530747c7baSLinus Walleij 		res = -EINVAL;
11540747c7baSLinus Walleij 	}
11550747c7baSLinus Walleij 
11560747c7baSLinus Walleij 	if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY &&
11570747c7baSLinus Walleij 	    d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
11580747c7baSLinus Walleij 	    d40c->runtime_addr == 0) {
11590747c7baSLinus Walleij 
11600747c7baSLinus Walleij 		dev_err(&d40c->chan.dev->device,
11610747c7baSLinus Walleij 			"[%s] Invalid TX channel address (%d)\n",
11620747c7baSLinus Walleij 			__func__, conf->dst_dev_type);
11630747c7baSLinus Walleij 		res = -EINVAL;
11640747c7baSLinus Walleij 	}
11650747c7baSLinus Walleij 
11660747c7baSLinus Walleij 	if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
11670747c7baSLinus Walleij 	    d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
11680747c7baSLinus Walleij 	    d40c->runtime_addr == 0) {
11690747c7baSLinus Walleij 		dev_err(&d40c->chan.dev->device,
11700747c7baSLinus Walleij 			"[%s] Invalid RX channel address (%d)\n",
11710747c7baSLinus Walleij 			__func__, conf->src_dev_type);
11720747c7baSLinus Walleij 		res = -EINVAL;
11730747c7baSLinus Walleij 	}
11740747c7baSLinus Walleij 
11750747c7baSLinus Walleij 	if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
11768d318a50SLinus Walleij 	    dst_event_group == STEDMA40_DEV_DST_MEMORY) {
11778d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
11788d318a50SLinus Walleij 			__func__);
11798d318a50SLinus Walleij 		res = -EINVAL;
11808d318a50SLinus Walleij 	}
11818d318a50SLinus Walleij 
11820747c7baSLinus Walleij 	if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
11838d318a50SLinus Walleij 	    src_event_group == STEDMA40_DEV_SRC_MEMORY) {
11848d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
11858d318a50SLinus Walleij 			__func__);
11868d318a50SLinus Walleij 		res = -EINVAL;
11878d318a50SLinus Walleij 	}
11888d318a50SLinus Walleij 
11898d318a50SLinus Walleij 	if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
11908d318a50SLinus Walleij 	    dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
11918d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
11928d318a50SLinus Walleij 			"[%s] No event line\n", __func__);
11938d318a50SLinus Walleij 		res = -EINVAL;
11948d318a50SLinus Walleij 	}
11958d318a50SLinus Walleij 
11968d318a50SLinus Walleij 	if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
11978d318a50SLinus Walleij 	    (src_event_group != dst_event_group)) {
11988d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
11998d318a50SLinus Walleij 			"[%s] Invalid event group\n", __func__);
12008d318a50SLinus Walleij 		res = -EINVAL;
12018d318a50SLinus Walleij 	}
12028d318a50SLinus Walleij 
12038d318a50SLinus Walleij 	if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
12048d318a50SLinus Walleij 		/*
12058d318a50SLinus Walleij 		 * DMAC HW supports it. Will be added to this driver,
12068d318a50SLinus Walleij 		 * in case any dma client requires it.
12078d318a50SLinus Walleij 		 */
12088d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
12098d318a50SLinus Walleij 			"[%s] periph to periph not supported\n",
12108d318a50SLinus Walleij 			__func__);
12118d318a50SLinus Walleij 		res = -EINVAL;
12128d318a50SLinus Walleij 	}
12138d318a50SLinus Walleij 
12148d318a50SLinus Walleij 	return res;
12158d318a50SLinus Walleij }
12168d318a50SLinus Walleij 
12178d318a50SLinus Walleij static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
12184aed79b2SMarcin Mielczarczyk 			       int log_event_line, bool is_log)
12198d318a50SLinus Walleij {
12208d318a50SLinus Walleij 	unsigned long flags;
12218d318a50SLinus Walleij 	spin_lock_irqsave(&phy->lock, flags);
12224aed79b2SMarcin Mielczarczyk 	if (!is_log) {
12238d318a50SLinus Walleij 		/* Physical interrupts are masked per physical full channel */
12248d318a50SLinus Walleij 		if (phy->allocated_src == D40_ALLOC_FREE &&
12258d318a50SLinus Walleij 		    phy->allocated_dst == D40_ALLOC_FREE) {
12268d318a50SLinus Walleij 			phy->allocated_dst = D40_ALLOC_PHY;
12278d318a50SLinus Walleij 			phy->allocated_src = D40_ALLOC_PHY;
12288d318a50SLinus Walleij 			goto found;
12298d318a50SLinus Walleij 		} else
12308d318a50SLinus Walleij 			goto not_found;
12318d318a50SLinus Walleij 	}
12328d318a50SLinus Walleij 
12338d318a50SLinus Walleij 	/* Logical channel */
12348d318a50SLinus Walleij 	if (is_src) {
12358d318a50SLinus Walleij 		if (phy->allocated_src == D40_ALLOC_PHY)
12368d318a50SLinus Walleij 			goto not_found;
12378d318a50SLinus Walleij 
12388d318a50SLinus Walleij 		if (phy->allocated_src == D40_ALLOC_FREE)
12398d318a50SLinus Walleij 			phy->allocated_src = D40_ALLOC_LOG_FREE;
12408d318a50SLinus Walleij 
12418d318a50SLinus Walleij 		if (!(phy->allocated_src & (1 << log_event_line))) {
12428d318a50SLinus Walleij 			phy->allocated_src |= 1 << log_event_line;
12438d318a50SLinus Walleij 			goto found;
12448d318a50SLinus Walleij 		} else
12458d318a50SLinus Walleij 			goto not_found;
12468d318a50SLinus Walleij 	} else {
12478d318a50SLinus Walleij 		if (phy->allocated_dst == D40_ALLOC_PHY)
12488d318a50SLinus Walleij 			goto not_found;
12498d318a50SLinus Walleij 
12508d318a50SLinus Walleij 		if (phy->allocated_dst == D40_ALLOC_FREE)
12518d318a50SLinus Walleij 			phy->allocated_dst = D40_ALLOC_LOG_FREE;
12528d318a50SLinus Walleij 
12538d318a50SLinus Walleij 		if (!(phy->allocated_dst & (1 << log_event_line))) {
12548d318a50SLinus Walleij 			phy->allocated_dst |= 1 << log_event_line;
12558d318a50SLinus Walleij 			goto found;
12568d318a50SLinus Walleij 		} else
12578d318a50SLinus Walleij 			goto not_found;
12588d318a50SLinus Walleij 	}
12598d318a50SLinus Walleij 
12608d318a50SLinus Walleij not_found:
12618d318a50SLinus Walleij 	spin_unlock_irqrestore(&phy->lock, flags);
12628d318a50SLinus Walleij 	return false;
12638d318a50SLinus Walleij found:
12648d318a50SLinus Walleij 	spin_unlock_irqrestore(&phy->lock, flags);
12658d318a50SLinus Walleij 	return true;
12668d318a50SLinus Walleij }
12678d318a50SLinus Walleij 
12688d318a50SLinus Walleij static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
12698d318a50SLinus Walleij 			       int log_event_line)
12708d318a50SLinus Walleij {
12718d318a50SLinus Walleij 	unsigned long flags;
12728d318a50SLinus Walleij 	bool is_free = false;
12738d318a50SLinus Walleij 
12748d318a50SLinus Walleij 	spin_lock_irqsave(&phy->lock, flags);
12758d318a50SLinus Walleij 	if (!log_event_line) {
12768d318a50SLinus Walleij 		phy->allocated_dst = D40_ALLOC_FREE;
12778d318a50SLinus Walleij 		phy->allocated_src = D40_ALLOC_FREE;
12788d318a50SLinus Walleij 		is_free = true;
12798d318a50SLinus Walleij 		goto out;
12808d318a50SLinus Walleij 	}
12818d318a50SLinus Walleij 
12828d318a50SLinus Walleij 	/* Logical channel */
12838d318a50SLinus Walleij 	if (is_src) {
12848d318a50SLinus Walleij 		phy->allocated_src &= ~(1 << log_event_line);
12858d318a50SLinus Walleij 		if (phy->allocated_src == D40_ALLOC_LOG_FREE)
12868d318a50SLinus Walleij 			phy->allocated_src = D40_ALLOC_FREE;
12878d318a50SLinus Walleij 	} else {
12888d318a50SLinus Walleij 		phy->allocated_dst &= ~(1 << log_event_line);
12898d318a50SLinus Walleij 		if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
12908d318a50SLinus Walleij 			phy->allocated_dst = D40_ALLOC_FREE;
12918d318a50SLinus Walleij 	}
12928d318a50SLinus Walleij 
12938d318a50SLinus Walleij 	is_free = ((phy->allocated_src | phy->allocated_dst) ==
12948d318a50SLinus Walleij 		   D40_ALLOC_FREE);
12958d318a50SLinus Walleij 
12968d318a50SLinus Walleij out:
12978d318a50SLinus Walleij 	spin_unlock_irqrestore(&phy->lock, flags);
12988d318a50SLinus Walleij 
12998d318a50SLinus Walleij 	return is_free;
13008d318a50SLinus Walleij }
13018d318a50SLinus Walleij 
13028d318a50SLinus Walleij static int d40_allocate_channel(struct d40_chan *d40c)
13038d318a50SLinus Walleij {
13048d318a50SLinus Walleij 	int dev_type;
13058d318a50SLinus Walleij 	int event_group;
13068d318a50SLinus Walleij 	int event_line;
13078d318a50SLinus Walleij 	struct d40_phy_res *phys;
13088d318a50SLinus Walleij 	int i;
13098d318a50SLinus Walleij 	int j;
13108d318a50SLinus Walleij 	int log_num;
13118d318a50SLinus Walleij 	bool is_src;
1312508849adSLinus Walleij 	bool is_log = (d40c->dma_cfg.channel_type &
1313508849adSLinus Walleij 		       STEDMA40_CHANNEL_IN_OPER_MODE)
13148d318a50SLinus Walleij 		== STEDMA40_CHANNEL_IN_LOG_MODE;
13158d318a50SLinus Walleij 
13168d318a50SLinus Walleij 
13178d318a50SLinus Walleij 	phys = d40c->base->phy_res;
13188d318a50SLinus Walleij 
13198d318a50SLinus Walleij 	if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
13208d318a50SLinus Walleij 		dev_type = d40c->dma_cfg.src_dev_type;
13218d318a50SLinus Walleij 		log_num = 2 * dev_type;
13228d318a50SLinus Walleij 		is_src = true;
13238d318a50SLinus Walleij 	} else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
13248d318a50SLinus Walleij 		   d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
13258d318a50SLinus Walleij 		/* dst event lines are used for logical memcpy */
13268d318a50SLinus Walleij 		dev_type = d40c->dma_cfg.dst_dev_type;
13278d318a50SLinus Walleij 		log_num = 2 * dev_type + 1;
13288d318a50SLinus Walleij 		is_src = false;
13298d318a50SLinus Walleij 	} else
13308d318a50SLinus Walleij 		return -EINVAL;
13318d318a50SLinus Walleij 
13328d318a50SLinus Walleij 	event_group = D40_TYPE_TO_GROUP(dev_type);
13338d318a50SLinus Walleij 	event_line = D40_TYPE_TO_EVENT(dev_type);
13348d318a50SLinus Walleij 
13358d318a50SLinus Walleij 	if (!is_log) {
13368d318a50SLinus Walleij 		if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
13378d318a50SLinus Walleij 			/* Find physical half channel */
13388d318a50SLinus Walleij 			for (i = 0; i < d40c->base->num_phy_chans; i++) {
13398d318a50SLinus Walleij 
13404aed79b2SMarcin Mielczarczyk 				if (d40_alloc_mask_set(&phys[i], is_src,
13414aed79b2SMarcin Mielczarczyk 						       0, is_log))
13428d318a50SLinus Walleij 					goto found_phy;
13438d318a50SLinus Walleij 			}
13448d318a50SLinus Walleij 		} else
13458d318a50SLinus Walleij 			for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
13468d318a50SLinus Walleij 				int phy_num = j  + event_group * 2;
13478d318a50SLinus Walleij 				for (i = phy_num; i < phy_num + 2; i++) {
1348508849adSLinus Walleij 					if (d40_alloc_mask_set(&phys[i],
1349508849adSLinus Walleij 							       is_src,
1350508849adSLinus Walleij 							       0,
1351508849adSLinus Walleij 							       is_log))
13528d318a50SLinus Walleij 						goto found_phy;
13538d318a50SLinus Walleij 				}
13548d318a50SLinus Walleij 			}
13558d318a50SLinus Walleij 		return -EINVAL;
13568d318a50SLinus Walleij found_phy:
13578d318a50SLinus Walleij 		d40c->phy_chan = &phys[i];
13588d318a50SLinus Walleij 		d40c->log_num = D40_PHY_CHAN;
13598d318a50SLinus Walleij 		goto out;
13608d318a50SLinus Walleij 	}
13618d318a50SLinus Walleij 	if (dev_type == -1)
13628d318a50SLinus Walleij 		return -EINVAL;
13638d318a50SLinus Walleij 
13648d318a50SLinus Walleij 	/* Find logical channel */
13658d318a50SLinus Walleij 	for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
13668d318a50SLinus Walleij 		int phy_num = j + event_group * 2;
13678d318a50SLinus Walleij 		/*
13688d318a50SLinus Walleij 		 * Spread logical channels across all available physical rather
13698d318a50SLinus Walleij 		 * than pack every logical channel at the first available phy
13708d318a50SLinus Walleij 		 * channels.
13718d318a50SLinus Walleij 		 */
13728d318a50SLinus Walleij 		if (is_src) {
13738d318a50SLinus Walleij 			for (i = phy_num; i < phy_num + 2; i++) {
13748d318a50SLinus Walleij 				if (d40_alloc_mask_set(&phys[i], is_src,
13754aed79b2SMarcin Mielczarczyk 						       event_line, is_log))
13768d318a50SLinus Walleij 					goto found_log;
13778d318a50SLinus Walleij 			}
13788d318a50SLinus Walleij 		} else {
13798d318a50SLinus Walleij 			for (i = phy_num + 1; i >= phy_num; i--) {
13808d318a50SLinus Walleij 				if (d40_alloc_mask_set(&phys[i], is_src,
13814aed79b2SMarcin Mielczarczyk 						       event_line, is_log))
13828d318a50SLinus Walleij 					goto found_log;
13838d318a50SLinus Walleij 			}
13848d318a50SLinus Walleij 		}
13858d318a50SLinus Walleij 	}
13868d318a50SLinus Walleij 	return -EINVAL;
13878d318a50SLinus Walleij 
13888d318a50SLinus Walleij found_log:
13898d318a50SLinus Walleij 	d40c->phy_chan = &phys[i];
13908d318a50SLinus Walleij 	d40c->log_num = log_num;
13918d318a50SLinus Walleij out:
13928d318a50SLinus Walleij 
13938d318a50SLinus Walleij 	if (is_log)
13948d318a50SLinus Walleij 		d40c->base->lookup_log_chans[d40c->log_num] = d40c;
13958d318a50SLinus Walleij 	else
13968d318a50SLinus Walleij 		d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
13978d318a50SLinus Walleij 
13988d318a50SLinus Walleij 	return 0;
13998d318a50SLinus Walleij 
14008d318a50SLinus Walleij }
14018d318a50SLinus Walleij 
14028d318a50SLinus Walleij static int d40_config_memcpy(struct d40_chan *d40c)
14038d318a50SLinus Walleij {
14048d318a50SLinus Walleij 	dma_cap_mask_t cap = d40c->chan.device->cap_mask;
14058d318a50SLinus Walleij 
14068d318a50SLinus Walleij 	if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
14078d318a50SLinus Walleij 		d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
14088d318a50SLinus Walleij 		d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
14098d318a50SLinus Walleij 		d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
14108d318a50SLinus Walleij 			memcpy[d40c->chan.chan_id];
14118d318a50SLinus Walleij 
14128d318a50SLinus Walleij 	} else if (dma_has_cap(DMA_MEMCPY, cap) &&
14138d318a50SLinus Walleij 		   dma_has_cap(DMA_SLAVE, cap)) {
14148d318a50SLinus Walleij 		d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
14158d318a50SLinus Walleij 	} else {
14168d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
14178d318a50SLinus Walleij 			__func__);
14188d318a50SLinus Walleij 		return -EINVAL;
14198d318a50SLinus Walleij 	}
14208d318a50SLinus Walleij 
14218d318a50SLinus Walleij 	return 0;
14228d318a50SLinus Walleij }
14238d318a50SLinus Walleij 
14248d318a50SLinus Walleij 
14258d318a50SLinus Walleij static int d40_free_dma(struct d40_chan *d40c)
14268d318a50SLinus Walleij {
14278d318a50SLinus Walleij 
14288d318a50SLinus Walleij 	int res = 0;
1429d181b3a8SJonas Aaberg 	u32 event;
14308d318a50SLinus Walleij 	struct d40_phy_res *phy = d40c->phy_chan;
14318d318a50SLinus Walleij 	bool is_src;
1432a8be8627SPer Friden 	struct d40_desc *d;
1433a8be8627SPer Friden 	struct d40_desc *_d;
1434a8be8627SPer Friden 
14358d318a50SLinus Walleij 
14368d318a50SLinus Walleij 	/* Terminate all queued and active transfers */
14378d318a50SLinus Walleij 	d40_term_all(d40c);
14388d318a50SLinus Walleij 
1439a8be8627SPer Friden 	/* Release client owned descriptors */
1440a8be8627SPer Friden 	if (!list_empty(&d40c->client))
1441a8be8627SPer Friden 		list_for_each_entry_safe(d, _d, &d40c->client, node) {
1442a8be8627SPer Friden 			d40_pool_lli_free(d);
1443a8be8627SPer Friden 			d40_desc_remove(d);
1444a8be8627SPer Friden 			d40_desc_free(d40c, d);
1445a8be8627SPer Friden 		}
1446a8be8627SPer Friden 
14478d318a50SLinus Walleij 	if (phy == NULL) {
14488d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
14498d318a50SLinus Walleij 			__func__);
14508d318a50SLinus Walleij 		return -EINVAL;
14518d318a50SLinus Walleij 	}
14528d318a50SLinus Walleij 
14538d318a50SLinus Walleij 	if (phy->allocated_src == D40_ALLOC_FREE &&
14548d318a50SLinus Walleij 	    phy->allocated_dst == D40_ALLOC_FREE) {
14558d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
14568d318a50SLinus Walleij 			__func__);
14578d318a50SLinus Walleij 		return -EINVAL;
14588d318a50SLinus Walleij 	}
14598d318a50SLinus Walleij 
14608d318a50SLinus Walleij 	if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
14618d318a50SLinus Walleij 	    d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
14628d318a50SLinus Walleij 		event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
14638d318a50SLinus Walleij 		is_src = false;
14648d318a50SLinus Walleij 	} else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
14658d318a50SLinus Walleij 		event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
14668d318a50SLinus Walleij 		is_src = true;
14678d318a50SLinus Walleij 	} else {
14688d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
14698d318a50SLinus Walleij 			"[%s] Unknown direction\n", __func__);
14708d318a50SLinus Walleij 		return -EINVAL;
14718d318a50SLinus Walleij 	}
14728d318a50SLinus Walleij 
1473d181b3a8SJonas Aaberg 	res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1474d181b3a8SJonas Aaberg 	if (res) {
1475d181b3a8SJonas Aaberg 		dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n",
1476d181b3a8SJonas Aaberg 			__func__);
1477d181b3a8SJonas Aaberg 		return res;
1478d181b3a8SJonas Aaberg 	}
14798d318a50SLinus Walleij 
1480d181b3a8SJonas Aaberg 	if (d40c->log_num != D40_PHY_CHAN) {
1481d181b3a8SJonas Aaberg 		/* Release logical channel, deactivate the event line */
1482d181b3a8SJonas Aaberg 
1483d181b3a8SJonas Aaberg 		d40_config_set_event(d40c, false);
14848d318a50SLinus Walleij 		d40c->base->lookup_log_chans[d40c->log_num] = NULL;
14858d318a50SLinus Walleij 
14868d318a50SLinus Walleij 		/*
14878d318a50SLinus Walleij 		 * Check if there are more logical allocation
14888d318a50SLinus Walleij 		 * on this phy channel.
14898d318a50SLinus Walleij 		 */
14908d318a50SLinus Walleij 		if (!d40_alloc_mask_free(phy, is_src, event)) {
14918d318a50SLinus Walleij 			/* Resume the other logical channels if any */
14928d318a50SLinus Walleij 			if (d40_chan_has_events(d40c)) {
14938d318a50SLinus Walleij 				res = d40_channel_execute_command(d40c,
14948d318a50SLinus Walleij 								  D40_DMA_RUN);
14958d318a50SLinus Walleij 				if (res) {
14968d318a50SLinus Walleij 					dev_err(&d40c->chan.dev->device,
14978d318a50SLinus Walleij 						"[%s] Executing RUN command\n",
14988d318a50SLinus Walleij 						__func__);
14998d318a50SLinus Walleij 					return res;
15008d318a50SLinus Walleij 				}
15018d318a50SLinus Walleij 			}
15028d318a50SLinus Walleij 			return 0;
15038d318a50SLinus Walleij 		}
1504d181b3a8SJonas Aaberg 	} else {
1505d181b3a8SJonas Aaberg 		(void) d40_alloc_mask_free(phy, is_src, 0);
1506d181b3a8SJonas Aaberg 	}
15078d318a50SLinus Walleij 
15088d318a50SLinus Walleij 	/* Release physical channel */
15098d318a50SLinus Walleij 	res = d40_channel_execute_command(d40c, D40_DMA_STOP);
15108d318a50SLinus Walleij 	if (res) {
15118d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
15128d318a50SLinus Walleij 			"[%s] Failed to stop channel\n", __func__);
15138d318a50SLinus Walleij 		return res;
15148d318a50SLinus Walleij 	}
15158d318a50SLinus Walleij 	d40c->phy_chan = NULL;
15168d318a50SLinus Walleij 	/* Invalidate channel type */
15178d318a50SLinus Walleij 	d40c->dma_cfg.channel_type = 0;
15188d318a50SLinus Walleij 	d40c->base->lookup_phy_chans[phy->num] = NULL;
15198d318a50SLinus Walleij 
15208d318a50SLinus Walleij 	return 0;
15218d318a50SLinus Walleij }
15228d318a50SLinus Walleij 
1523a5ebca47SJonas Aaberg static bool d40_is_paused(struct d40_chan *d40c)
1524a5ebca47SJonas Aaberg {
1525a5ebca47SJonas Aaberg 	bool is_paused = false;
1526a5ebca47SJonas Aaberg 	unsigned long flags;
1527a5ebca47SJonas Aaberg 	void __iomem *active_reg;
1528a5ebca47SJonas Aaberg 	u32 status;
1529a5ebca47SJonas Aaberg 	u32 event;
1530a5ebca47SJonas Aaberg 
1531a5ebca47SJonas Aaberg 	spin_lock_irqsave(&d40c->lock, flags);
1532a5ebca47SJonas Aaberg 
1533a5ebca47SJonas Aaberg 	if (d40c->log_num == D40_PHY_CHAN) {
1534a5ebca47SJonas Aaberg 		if (d40c->phy_chan->num % 2 == 0)
1535a5ebca47SJonas Aaberg 			active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1536a5ebca47SJonas Aaberg 		else
1537a5ebca47SJonas Aaberg 			active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1538a5ebca47SJonas Aaberg 
1539a5ebca47SJonas Aaberg 		status = (readl(active_reg) &
1540a5ebca47SJonas Aaberg 			  D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1541a5ebca47SJonas Aaberg 			D40_CHAN_POS(d40c->phy_chan->num);
1542a5ebca47SJonas Aaberg 		if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1543a5ebca47SJonas Aaberg 			is_paused = true;
1544a5ebca47SJonas Aaberg 
1545a5ebca47SJonas Aaberg 		goto _exit;
1546a5ebca47SJonas Aaberg 	}
1547a5ebca47SJonas Aaberg 
1548a5ebca47SJonas Aaberg 	if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
15499dbfbd35SJonas Aaberg 	    d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1550a5ebca47SJonas Aaberg 		event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
15519dbfbd35SJonas Aaberg 		status = readl(d40c->base->virtbase + D40_DREG_PCBASE +
15529dbfbd35SJonas Aaberg 			       d40c->phy_chan->num * D40_DREG_PCDELTA +
15539dbfbd35SJonas Aaberg 			       D40_CHAN_REG_SDLNK);
15549dbfbd35SJonas Aaberg 	} else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1555a5ebca47SJonas Aaberg 		event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
15569dbfbd35SJonas Aaberg 		status = readl(d40c->base->virtbase + D40_DREG_PCBASE +
15579dbfbd35SJonas Aaberg 			       d40c->phy_chan->num * D40_DREG_PCDELTA +
15589dbfbd35SJonas Aaberg 			       D40_CHAN_REG_SSLNK);
15599dbfbd35SJonas Aaberg 	} else {
1560a5ebca47SJonas Aaberg 		dev_err(&d40c->chan.dev->device,
1561a5ebca47SJonas Aaberg 			"[%s] Unknown direction\n", __func__);
1562a5ebca47SJonas Aaberg 		goto _exit;
1563a5ebca47SJonas Aaberg 	}
15649dbfbd35SJonas Aaberg 
1565a5ebca47SJonas Aaberg 	status = (status & D40_EVENTLINE_MASK(event)) >>
1566a5ebca47SJonas Aaberg 		D40_EVENTLINE_POS(event);
1567a5ebca47SJonas Aaberg 
1568a5ebca47SJonas Aaberg 	if (status != D40_DMA_RUN)
1569a5ebca47SJonas Aaberg 		is_paused = true;
1570a5ebca47SJonas Aaberg _exit:
1571a5ebca47SJonas Aaberg 	spin_unlock_irqrestore(&d40c->lock, flags);
1572a5ebca47SJonas Aaberg 	return is_paused;
1573a5ebca47SJonas Aaberg 
1574a5ebca47SJonas Aaberg }
1575a5ebca47SJonas Aaberg 
1576a5ebca47SJonas Aaberg 
15778d318a50SLinus Walleij static u32 stedma40_residue(struct dma_chan *chan)
15788d318a50SLinus Walleij {
15798d318a50SLinus Walleij 	struct d40_chan *d40c =
15808d318a50SLinus Walleij 		container_of(chan, struct d40_chan, chan);
15818d318a50SLinus Walleij 	u32 bytes_left;
15828d318a50SLinus Walleij 	unsigned long flags;
15838d318a50SLinus Walleij 
15848d318a50SLinus Walleij 	spin_lock_irqsave(&d40c->lock, flags);
15858d318a50SLinus Walleij 	bytes_left = d40_residue(d40c);
15868d318a50SLinus Walleij 	spin_unlock_irqrestore(&d40c->lock, flags);
15878d318a50SLinus Walleij 
15888d318a50SLinus Walleij 	return bytes_left;
15898d318a50SLinus Walleij }
15908d318a50SLinus Walleij 
15918d318a50SLinus Walleij /* Public DMA functions in addition to the DMA engine framework */
15928d318a50SLinus Walleij 
15938d318a50SLinus Walleij int stedma40_set_psize(struct dma_chan *chan,
15948d318a50SLinus Walleij 		       int src_psize,
15958d318a50SLinus Walleij 		       int dst_psize)
15968d318a50SLinus Walleij {
15978d318a50SLinus Walleij 	struct d40_chan *d40c =
15988d318a50SLinus Walleij 		container_of(chan, struct d40_chan, chan);
15998d318a50SLinus Walleij 	unsigned long flags;
16008d318a50SLinus Walleij 
16018d318a50SLinus Walleij 	spin_lock_irqsave(&d40c->lock, flags);
16028d318a50SLinus Walleij 
16038d318a50SLinus Walleij 	if (d40c->log_num != D40_PHY_CHAN) {
16048d318a50SLinus Walleij 		d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
16058d318a50SLinus Walleij 		d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1606508849adSLinus Walleij 		d40c->log_def.lcsp1 |= src_psize <<
1607508849adSLinus Walleij 			D40_MEM_LCSP1_SCFG_PSIZE_POS;
1608508849adSLinus Walleij 		d40c->log_def.lcsp3 |= dst_psize <<
1609508849adSLinus Walleij 			D40_MEM_LCSP1_SCFG_PSIZE_POS;
16108d318a50SLinus Walleij 		goto out;
16118d318a50SLinus Walleij 	}
16128d318a50SLinus Walleij 
16138d318a50SLinus Walleij 	if (src_psize == STEDMA40_PSIZE_PHY_1)
16148d318a50SLinus Walleij 		d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
16158d318a50SLinus Walleij 	else {
16168d318a50SLinus Walleij 		d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
16178d318a50SLinus Walleij 		d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
16188d318a50SLinus Walleij 				       D40_SREG_CFG_PSIZE_POS);
16198d318a50SLinus Walleij 		d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
16208d318a50SLinus Walleij 	}
16218d318a50SLinus Walleij 
16228d318a50SLinus Walleij 	if (dst_psize == STEDMA40_PSIZE_PHY_1)
16238d318a50SLinus Walleij 		d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
16248d318a50SLinus Walleij 	else {
16258d318a50SLinus Walleij 		d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
16268d318a50SLinus Walleij 		d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
16278d318a50SLinus Walleij 				       D40_SREG_CFG_PSIZE_POS);
16288d318a50SLinus Walleij 		d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
16298d318a50SLinus Walleij 	}
16308d318a50SLinus Walleij out:
16318d318a50SLinus Walleij 	spin_unlock_irqrestore(&d40c->lock, flags);
16328d318a50SLinus Walleij 	return 0;
16338d318a50SLinus Walleij }
16348d318a50SLinus Walleij EXPORT_SYMBOL(stedma40_set_psize);
16358d318a50SLinus Walleij 
16368d318a50SLinus Walleij struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
16378d318a50SLinus Walleij 						   struct scatterlist *sgl_dst,
16388d318a50SLinus Walleij 						   struct scatterlist *sgl_src,
16398d318a50SLinus Walleij 						   unsigned int sgl_len,
16402a614340SJonas Aaberg 						   unsigned long dma_flags)
16418d318a50SLinus Walleij {
16428d318a50SLinus Walleij 	int res;
16438d318a50SLinus Walleij 	struct d40_desc *d40d;
16448d318a50SLinus Walleij 	struct d40_chan *d40c = container_of(chan, struct d40_chan,
16458d318a50SLinus Walleij 					     chan);
16462a614340SJonas Aaberg 	unsigned long flags;
16478d318a50SLinus Walleij 
16480d0f6b8bSJonas Aaberg 	if (d40c->phy_chan == NULL) {
16490d0f6b8bSJonas Aaberg 		dev_err(&d40c->chan.dev->device,
16500d0f6b8bSJonas Aaberg 			"[%s] Unallocated channel.\n", __func__);
16510d0f6b8bSJonas Aaberg 		return ERR_PTR(-EINVAL);
16520d0f6b8bSJonas Aaberg 	}
16530d0f6b8bSJonas Aaberg 
16542a614340SJonas Aaberg 	spin_lock_irqsave(&d40c->lock, flags);
16558d318a50SLinus Walleij 	d40d = d40_desc_get(d40c);
16568d318a50SLinus Walleij 
16578d318a50SLinus Walleij 	if (d40d == NULL)
16588d318a50SLinus Walleij 		goto err;
16598d318a50SLinus Walleij 
16608d318a50SLinus Walleij 	d40d->lli_len = sgl_len;
1661698e4732SJonas Aaberg 	d40d->lli_current = 0;
16622a614340SJonas Aaberg 	d40d->txd.flags = dma_flags;
16638d318a50SLinus Walleij 
16648d318a50SLinus Walleij 	if (d40c->log_num != D40_PHY_CHAN) {
16658d318a50SLinus Walleij 
16668d318a50SLinus Walleij 		if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
16678d318a50SLinus Walleij 			dev_err(&d40c->chan.dev->device,
16688d318a50SLinus Walleij 				"[%s] Out of memory\n", __func__);
16698d318a50SLinus Walleij 			goto err;
16708d318a50SLinus Walleij 		}
16718d318a50SLinus Walleij 
1672698e4732SJonas Aaberg 		(void) d40_log_sg_to_lli(sgl_src,
16738d318a50SLinus Walleij 					 sgl_len,
16748d318a50SLinus Walleij 					 d40d->lli_log.src,
16758d318a50SLinus Walleij 					 d40c->log_def.lcsp1,
1676698e4732SJonas Aaberg 					 d40c->dma_cfg.src_info.data_width);
16778d318a50SLinus Walleij 
1678698e4732SJonas Aaberg 		(void) d40_log_sg_to_lli(sgl_dst,
16798d318a50SLinus Walleij 					 sgl_len,
16808d318a50SLinus Walleij 					 d40d->lli_log.dst,
16818d318a50SLinus Walleij 					 d40c->log_def.lcsp3,
1682698e4732SJonas Aaberg 					 d40c->dma_cfg.dst_info.data_width);
16838d318a50SLinus Walleij 	} else {
16848d318a50SLinus Walleij 		if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
16858d318a50SLinus Walleij 			dev_err(&d40c->chan.dev->device,
16868d318a50SLinus Walleij 				"[%s] Out of memory\n", __func__);
16878d318a50SLinus Walleij 			goto err;
16888d318a50SLinus Walleij 		}
16898d318a50SLinus Walleij 
16908d318a50SLinus Walleij 		res = d40_phy_sg_to_lli(sgl_src,
16918d318a50SLinus Walleij 					sgl_len,
16928d318a50SLinus Walleij 					0,
16938d318a50SLinus Walleij 					d40d->lli_phy.src,
1694aa182ae2SJonas Aaberg 					virt_to_phys(d40d->lli_phy.src),
16958d318a50SLinus Walleij 					d40c->src_def_cfg,
16968d318a50SLinus Walleij 					d40c->dma_cfg.src_info.data_width,
16970246e77bSJonas Aaberg 					d40c->dma_cfg.src_info.psize);
16988d318a50SLinus Walleij 
16998d318a50SLinus Walleij 		if (res < 0)
17008d318a50SLinus Walleij 			goto err;
17018d318a50SLinus Walleij 
17028d318a50SLinus Walleij 		res = d40_phy_sg_to_lli(sgl_dst,
17038d318a50SLinus Walleij 					sgl_len,
17048d318a50SLinus Walleij 					0,
17058d318a50SLinus Walleij 					d40d->lli_phy.dst,
1706aa182ae2SJonas Aaberg 					virt_to_phys(d40d->lli_phy.dst),
17078d318a50SLinus Walleij 					d40c->dst_def_cfg,
17088d318a50SLinus Walleij 					d40c->dma_cfg.dst_info.data_width,
17090246e77bSJonas Aaberg 					d40c->dma_cfg.dst_info.psize);
17108d318a50SLinus Walleij 
17118d318a50SLinus Walleij 		if (res < 0)
17128d318a50SLinus Walleij 			goto err;
17138d318a50SLinus Walleij 
17148d318a50SLinus Walleij 		(void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
17158d318a50SLinus Walleij 				      d40d->lli_pool.size, DMA_TO_DEVICE);
17168d318a50SLinus Walleij 	}
17178d318a50SLinus Walleij 
17188d318a50SLinus Walleij 	dma_async_tx_descriptor_init(&d40d->txd, chan);
17198d318a50SLinus Walleij 
17208d318a50SLinus Walleij 	d40d->txd.tx_submit = d40_tx_submit;
17218d318a50SLinus Walleij 
17222a614340SJonas Aaberg 	spin_unlock_irqrestore(&d40c->lock, flags);
17238d318a50SLinus Walleij 
17248d318a50SLinus Walleij 	return &d40d->txd;
17258d318a50SLinus Walleij err:
17262a614340SJonas Aaberg 	spin_unlock_irqrestore(&d40c->lock, flags);
17278d318a50SLinus Walleij 	return NULL;
17288d318a50SLinus Walleij }
17298d318a50SLinus Walleij EXPORT_SYMBOL(stedma40_memcpy_sg);
17308d318a50SLinus Walleij 
17318d318a50SLinus Walleij bool stedma40_filter(struct dma_chan *chan, void *data)
17328d318a50SLinus Walleij {
17338d318a50SLinus Walleij 	struct stedma40_chan_cfg *info = data;
17348d318a50SLinus Walleij 	struct d40_chan *d40c =
17358d318a50SLinus Walleij 		container_of(chan, struct d40_chan, chan);
17368d318a50SLinus Walleij 	int err;
17378d318a50SLinus Walleij 
17388d318a50SLinus Walleij 	if (data) {
17398d318a50SLinus Walleij 		err = d40_validate_conf(d40c, info);
17408d318a50SLinus Walleij 		if (!err)
17418d318a50SLinus Walleij 			d40c->dma_cfg = *info;
17428d318a50SLinus Walleij 	} else
17438d318a50SLinus Walleij 		err = d40_config_memcpy(d40c);
17448d318a50SLinus Walleij 
17458d318a50SLinus Walleij 	return err == 0;
17468d318a50SLinus Walleij }
17478d318a50SLinus Walleij EXPORT_SYMBOL(stedma40_filter);
17488d318a50SLinus Walleij 
17498d318a50SLinus Walleij /* DMA ENGINE functions */
17508d318a50SLinus Walleij static int d40_alloc_chan_resources(struct dma_chan *chan)
17518d318a50SLinus Walleij {
17528d318a50SLinus Walleij 	int err;
17538d318a50SLinus Walleij 	unsigned long flags;
17548d318a50SLinus Walleij 	struct d40_chan *d40c =
17558d318a50SLinus Walleij 		container_of(chan, struct d40_chan, chan);
1756ef1872ecSLinus Walleij 	bool is_free_phy;
17578d318a50SLinus Walleij 	spin_lock_irqsave(&d40c->lock, flags);
17588d318a50SLinus Walleij 
17598d318a50SLinus Walleij 	d40c->completed = chan->cookie = 1;
17608d318a50SLinus Walleij 
17618d318a50SLinus Walleij 	/*
17628d318a50SLinus Walleij 	 * If no dma configuration is set (channel_type == 0)
1763ef1872ecSLinus Walleij 	 * use default configuration (memcpy)
17648d318a50SLinus Walleij 	 */
17658d318a50SLinus Walleij 	if (d40c->dma_cfg.channel_type == 0) {
1766aa182ae2SJonas Aaberg 
17678d318a50SLinus Walleij 		err = d40_config_memcpy(d40c);
1768ff0b12baSJonas Aaberg 		if (err) {
1769ff0b12baSJonas Aaberg 			dev_err(&d40c->chan.dev->device,
1770ff0b12baSJonas Aaberg 				"[%s] Failed to configure memcpy channel\n",
1771ff0b12baSJonas Aaberg 				__func__);
1772ff0b12baSJonas Aaberg 			goto fail;
1773ff0b12baSJonas Aaberg 		}
17748d318a50SLinus Walleij 	}
1775ef1872ecSLinus Walleij 	is_free_phy = (d40c->phy_chan == NULL);
17768d318a50SLinus Walleij 
17778d318a50SLinus Walleij 	err = d40_allocate_channel(d40c);
17788d318a50SLinus Walleij 	if (err) {
17798d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
17808d318a50SLinus Walleij 			"[%s] Failed to allocate channel\n", __func__);
1781ff0b12baSJonas Aaberg 		goto fail;
17828d318a50SLinus Walleij 	}
17838d318a50SLinus Walleij 
1784ef1872ecSLinus Walleij 	/* Fill in basic CFG register values */
1785ef1872ecSLinus Walleij 	d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1786ef1872ecSLinus Walleij 		    &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
1787ef1872ecSLinus Walleij 
1788ef1872ecSLinus Walleij 	if (d40c->log_num != D40_PHY_CHAN) {
1789ef1872ecSLinus Walleij 		d40_log_cfg(&d40c->dma_cfg,
1790ef1872ecSLinus Walleij 			    &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1791ef1872ecSLinus Walleij 
1792ef1872ecSLinus Walleij 		if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1793ef1872ecSLinus Walleij 			d40c->lcpa = d40c->base->lcpa_base +
1794ef1872ecSLinus Walleij 			  d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
1795ef1872ecSLinus Walleij 		else
1796ef1872ecSLinus Walleij 			d40c->lcpa = d40c->base->lcpa_base +
1797ef1872ecSLinus Walleij 			  d40c->dma_cfg.dst_dev_type *
1798ef1872ecSLinus Walleij 			  D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
1799ef1872ecSLinus Walleij 	}
1800ef1872ecSLinus Walleij 
1801ef1872ecSLinus Walleij 	/*
1802ef1872ecSLinus Walleij 	 * Only write channel configuration to the DMA if the physical
1803ef1872ecSLinus Walleij 	 * resource is free. In case of multiple logical channels
1804ef1872ecSLinus Walleij 	 * on the same physical resource, only the first write is necessary.
1805ef1872ecSLinus Walleij 	 */
1806b55912c6SJonas Aaberg 	if (is_free_phy)
1807b55912c6SJonas Aaberg 		d40_config_write(d40c);
1808ff0b12baSJonas Aaberg fail:
18098d318a50SLinus Walleij 	spin_unlock_irqrestore(&d40c->lock, flags);
1810ff0b12baSJonas Aaberg 	return err;
18118d318a50SLinus Walleij }
18128d318a50SLinus Walleij 
18138d318a50SLinus Walleij static void d40_free_chan_resources(struct dma_chan *chan)
18148d318a50SLinus Walleij {
18158d318a50SLinus Walleij 	struct d40_chan *d40c =
18168d318a50SLinus Walleij 		container_of(chan, struct d40_chan, chan);
18178d318a50SLinus Walleij 	int err;
18188d318a50SLinus Walleij 	unsigned long flags;
18198d318a50SLinus Walleij 
18200d0f6b8bSJonas Aaberg 	if (d40c->phy_chan == NULL) {
18210d0f6b8bSJonas Aaberg 		dev_err(&d40c->chan.dev->device,
18220d0f6b8bSJonas Aaberg 			"[%s] Cannot free unallocated channel\n", __func__);
18230d0f6b8bSJonas Aaberg 		return;
18240d0f6b8bSJonas Aaberg 	}
18250d0f6b8bSJonas Aaberg 
18260d0f6b8bSJonas Aaberg 
18278d318a50SLinus Walleij 	spin_lock_irqsave(&d40c->lock, flags);
18288d318a50SLinus Walleij 
18298d318a50SLinus Walleij 	err = d40_free_dma(d40c);
18308d318a50SLinus Walleij 
18318d318a50SLinus Walleij 	if (err)
18328d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
18338d318a50SLinus Walleij 			"[%s] Failed to free channel\n", __func__);
18348d318a50SLinus Walleij 	spin_unlock_irqrestore(&d40c->lock, flags);
18358d318a50SLinus Walleij }
18368d318a50SLinus Walleij 
18378d318a50SLinus Walleij static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
18388d318a50SLinus Walleij 						       dma_addr_t dst,
18398d318a50SLinus Walleij 						       dma_addr_t src,
18408d318a50SLinus Walleij 						       size_t size,
18412a614340SJonas Aaberg 						       unsigned long dma_flags)
18428d318a50SLinus Walleij {
18438d318a50SLinus Walleij 	struct d40_desc *d40d;
18448d318a50SLinus Walleij 	struct d40_chan *d40c = container_of(chan, struct d40_chan,
18458d318a50SLinus Walleij 					     chan);
18462a614340SJonas Aaberg 	unsigned long flags;
18478d318a50SLinus Walleij 	int err = 0;
18488d318a50SLinus Walleij 
18490d0f6b8bSJonas Aaberg 	if (d40c->phy_chan == NULL) {
18500d0f6b8bSJonas Aaberg 		dev_err(&d40c->chan.dev->device,
18510d0f6b8bSJonas Aaberg 			"[%s] Channel is not allocated.\n", __func__);
18520d0f6b8bSJonas Aaberg 		return ERR_PTR(-EINVAL);
18530d0f6b8bSJonas Aaberg 	}
18540d0f6b8bSJonas Aaberg 
18552a614340SJonas Aaberg 	spin_lock_irqsave(&d40c->lock, flags);
18568d318a50SLinus Walleij 	d40d = d40_desc_get(d40c);
18578d318a50SLinus Walleij 
18588d318a50SLinus Walleij 	if (d40d == NULL) {
18598d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
18608d318a50SLinus Walleij 			"[%s] Descriptor is NULL\n", __func__);
18618d318a50SLinus Walleij 		goto err;
18628d318a50SLinus Walleij 	}
18638d318a50SLinus Walleij 
18642a614340SJonas Aaberg 	d40d->txd.flags = dma_flags;
18658d318a50SLinus Walleij 
18668d318a50SLinus Walleij 	dma_async_tx_descriptor_init(&d40d->txd, chan);
18678d318a50SLinus Walleij 
18688d318a50SLinus Walleij 	d40d->txd.tx_submit = d40_tx_submit;
18698d318a50SLinus Walleij 
18708d318a50SLinus Walleij 	if (d40c->log_num != D40_PHY_CHAN) {
18718d318a50SLinus Walleij 
18728d318a50SLinus Walleij 		if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
18738d318a50SLinus Walleij 			dev_err(&d40c->chan.dev->device,
18748d318a50SLinus Walleij 				"[%s] Out of memory\n", __func__);
18758d318a50SLinus Walleij 			goto err;
18768d318a50SLinus Walleij 		}
18778d318a50SLinus Walleij 		d40d->lli_len = 1;
1878698e4732SJonas Aaberg 		d40d->lli_current = 0;
18798d318a50SLinus Walleij 
18808d318a50SLinus Walleij 		d40_log_fill_lli(d40d->lli_log.src,
18818d318a50SLinus Walleij 				 src,
18828d318a50SLinus Walleij 				 size,
18838d318a50SLinus Walleij 				 d40c->log_def.lcsp1,
18848d318a50SLinus Walleij 				 d40c->dma_cfg.src_info.data_width,
1885698e4732SJonas Aaberg 				 true);
18868d318a50SLinus Walleij 
18878d318a50SLinus Walleij 		d40_log_fill_lli(d40d->lli_log.dst,
18888d318a50SLinus Walleij 				 dst,
18898d318a50SLinus Walleij 				 size,
18908d318a50SLinus Walleij 				 d40c->log_def.lcsp3,
18918d318a50SLinus Walleij 				 d40c->dma_cfg.dst_info.data_width,
1892698e4732SJonas Aaberg 				 true);
18938d318a50SLinus Walleij 
18948d318a50SLinus Walleij 	} else {
18958d318a50SLinus Walleij 
18968d318a50SLinus Walleij 		if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
18978d318a50SLinus Walleij 			dev_err(&d40c->chan.dev->device,
18988d318a50SLinus Walleij 				"[%s] Out of memory\n", __func__);
18998d318a50SLinus Walleij 			goto err;
19008d318a50SLinus Walleij 		}
19018d318a50SLinus Walleij 
19028d318a50SLinus Walleij 		err = d40_phy_fill_lli(d40d->lli_phy.src,
19038d318a50SLinus Walleij 				       src,
19048d318a50SLinus Walleij 				       size,
19058d318a50SLinus Walleij 				       d40c->dma_cfg.src_info.psize,
19068d318a50SLinus Walleij 				       0,
19078d318a50SLinus Walleij 				       d40c->src_def_cfg,
19088d318a50SLinus Walleij 				       true,
19098d318a50SLinus Walleij 				       d40c->dma_cfg.src_info.data_width,
19108d318a50SLinus Walleij 				       false);
19118d318a50SLinus Walleij 		if (err)
19128d318a50SLinus Walleij 			goto err_fill_lli;
19138d318a50SLinus Walleij 
19148d318a50SLinus Walleij 		err = d40_phy_fill_lli(d40d->lli_phy.dst,
19158d318a50SLinus Walleij 				       dst,
19168d318a50SLinus Walleij 				       size,
19178d318a50SLinus Walleij 				       d40c->dma_cfg.dst_info.psize,
19188d318a50SLinus Walleij 				       0,
19198d318a50SLinus Walleij 				       d40c->dst_def_cfg,
19208d318a50SLinus Walleij 				       true,
19218d318a50SLinus Walleij 				       d40c->dma_cfg.dst_info.data_width,
19228d318a50SLinus Walleij 				       false);
19238d318a50SLinus Walleij 
19248d318a50SLinus Walleij 		if (err)
19258d318a50SLinus Walleij 			goto err_fill_lli;
19268d318a50SLinus Walleij 
19278d318a50SLinus Walleij 		(void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
19288d318a50SLinus Walleij 				      d40d->lli_pool.size, DMA_TO_DEVICE);
19298d318a50SLinus Walleij 	}
19308d318a50SLinus Walleij 
19312a614340SJonas Aaberg 	spin_unlock_irqrestore(&d40c->lock, flags);
19328d318a50SLinus Walleij 	return &d40d->txd;
19338d318a50SLinus Walleij 
19348d318a50SLinus Walleij err_fill_lli:
19358d318a50SLinus Walleij 	dev_err(&d40c->chan.dev->device,
19368d318a50SLinus Walleij 		"[%s] Failed filling in PHY LLI\n", __func__);
19378d318a50SLinus Walleij 	d40_pool_lli_free(d40d);
19388d318a50SLinus Walleij err:
19392a614340SJonas Aaberg 	spin_unlock_irqrestore(&d40c->lock, flags);
19408d318a50SLinus Walleij 	return NULL;
19418d318a50SLinus Walleij }
19428d318a50SLinus Walleij 
19438d318a50SLinus Walleij static int d40_prep_slave_sg_log(struct d40_desc *d40d,
19448d318a50SLinus Walleij 				 struct d40_chan *d40c,
19458d318a50SLinus Walleij 				 struct scatterlist *sgl,
19468d318a50SLinus Walleij 				 unsigned int sg_len,
19478d318a50SLinus Walleij 				 enum dma_data_direction direction,
19482a614340SJonas Aaberg 				 unsigned long dma_flags)
19498d318a50SLinus Walleij {
19508d318a50SLinus Walleij 	dma_addr_t dev_addr = 0;
19518d318a50SLinus Walleij 	int total_size;
19528d318a50SLinus Walleij 
19538d318a50SLinus Walleij 	if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
19548d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
19558d318a50SLinus Walleij 			"[%s] Out of memory\n", __func__);
19568d318a50SLinus Walleij 		return -ENOMEM;
19578d318a50SLinus Walleij 	}
19588d318a50SLinus Walleij 
19598d318a50SLinus Walleij 	d40d->lli_len = sg_len;
1960698e4732SJonas Aaberg 	d40d->lli_current = 0;
19618d318a50SLinus Walleij 
19622a614340SJonas Aaberg 	if (direction == DMA_FROM_DEVICE)
196395e1400fSLinus Walleij 		if (d40c->runtime_addr)
196495e1400fSLinus Walleij 			dev_addr = d40c->runtime_addr;
196595e1400fSLinus Walleij 		else
19668d318a50SLinus Walleij 			dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
19672a614340SJonas Aaberg 	else if (direction == DMA_TO_DEVICE)
196895e1400fSLinus Walleij 		if (d40c->runtime_addr)
196995e1400fSLinus Walleij 			dev_addr = d40c->runtime_addr;
197095e1400fSLinus Walleij 		else
19718d318a50SLinus Walleij 			dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
197295e1400fSLinus Walleij 
19732a614340SJonas Aaberg 	else
19742a614340SJonas Aaberg 		return -EINVAL;
19752a614340SJonas Aaberg 
1976698e4732SJonas Aaberg 	total_size = d40_log_sg_to_dev(sgl, sg_len,
19778d318a50SLinus Walleij 				       &d40d->lli_log,
19788d318a50SLinus Walleij 				       &d40c->log_def,
19798d318a50SLinus Walleij 				       d40c->dma_cfg.src_info.data_width,
19808d318a50SLinus Walleij 				       d40c->dma_cfg.dst_info.data_width,
19818d318a50SLinus Walleij 				       direction,
1982698e4732SJonas Aaberg 				       dev_addr);
19832a614340SJonas Aaberg 
19848d318a50SLinus Walleij 	if (total_size < 0)
19858d318a50SLinus Walleij 		return -EINVAL;
19868d318a50SLinus Walleij 
19878d318a50SLinus Walleij 	return 0;
19888d318a50SLinus Walleij }
19898d318a50SLinus Walleij 
19908d318a50SLinus Walleij static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
19918d318a50SLinus Walleij 				 struct d40_chan *d40c,
19928d318a50SLinus Walleij 				 struct scatterlist *sgl,
19938d318a50SLinus Walleij 				 unsigned int sgl_len,
19948d318a50SLinus Walleij 				 enum dma_data_direction direction,
19952a614340SJonas Aaberg 				 unsigned long dma_flags)
19968d318a50SLinus Walleij {
19978d318a50SLinus Walleij 	dma_addr_t src_dev_addr;
19988d318a50SLinus Walleij 	dma_addr_t dst_dev_addr;
19998d318a50SLinus Walleij 	int res;
20008d318a50SLinus Walleij 
20018d318a50SLinus Walleij 	if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
20028d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
20038d318a50SLinus Walleij 			"[%s] Out of memory\n", __func__);
20048d318a50SLinus Walleij 		return -ENOMEM;
20058d318a50SLinus Walleij 	}
20068d318a50SLinus Walleij 
20078d318a50SLinus Walleij 	d40d->lli_len = sgl_len;
2008698e4732SJonas Aaberg 	d40d->lli_current = 0;
20098d318a50SLinus Walleij 
20108d318a50SLinus Walleij 	if (direction == DMA_FROM_DEVICE) {
20118d318a50SLinus Walleij 		dst_dev_addr = 0;
201295e1400fSLinus Walleij 		if (d40c->runtime_addr)
201395e1400fSLinus Walleij 			src_dev_addr = d40c->runtime_addr;
201495e1400fSLinus Walleij 		else
20158d318a50SLinus Walleij 			src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
20168d318a50SLinus Walleij 	} else if (direction == DMA_TO_DEVICE) {
201795e1400fSLinus Walleij 		if (d40c->runtime_addr)
201895e1400fSLinus Walleij 			dst_dev_addr = d40c->runtime_addr;
201995e1400fSLinus Walleij 		else
20208d318a50SLinus Walleij 			dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
20218d318a50SLinus Walleij 		src_dev_addr = 0;
20228d318a50SLinus Walleij 	} else
20238d318a50SLinus Walleij 		return -EINVAL;
20248d318a50SLinus Walleij 
20258d318a50SLinus Walleij 	res = d40_phy_sg_to_lli(sgl,
20268d318a50SLinus Walleij 				sgl_len,
20278d318a50SLinus Walleij 				src_dev_addr,
20288d318a50SLinus Walleij 				d40d->lli_phy.src,
2029aa182ae2SJonas Aaberg 				virt_to_phys(d40d->lli_phy.src),
20308d318a50SLinus Walleij 				d40c->src_def_cfg,
20318d318a50SLinus Walleij 				d40c->dma_cfg.src_info.data_width,
20320246e77bSJonas Aaberg 				d40c->dma_cfg.src_info.psize);
20338d318a50SLinus Walleij 	if (res < 0)
20348d318a50SLinus Walleij 		return res;
20358d318a50SLinus Walleij 
20368d318a50SLinus Walleij 	res = d40_phy_sg_to_lli(sgl,
20378d318a50SLinus Walleij 				sgl_len,
20388d318a50SLinus Walleij 				dst_dev_addr,
20398d318a50SLinus Walleij 				d40d->lli_phy.dst,
2040aa182ae2SJonas Aaberg 				virt_to_phys(d40d->lli_phy.dst),
20418d318a50SLinus Walleij 				d40c->dst_def_cfg,
20428d318a50SLinus Walleij 				d40c->dma_cfg.dst_info.data_width,
20430246e77bSJonas Aaberg 				d40c->dma_cfg.dst_info.psize);
20448d318a50SLinus Walleij 	if (res < 0)
20458d318a50SLinus Walleij 		return res;
20468d318a50SLinus Walleij 
20478d318a50SLinus Walleij 	(void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
20488d318a50SLinus Walleij 			      d40d->lli_pool.size, DMA_TO_DEVICE);
20498d318a50SLinus Walleij 	return 0;
20508d318a50SLinus Walleij }
20518d318a50SLinus Walleij 
20528d318a50SLinus Walleij static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
20538d318a50SLinus Walleij 							 struct scatterlist *sgl,
20548d318a50SLinus Walleij 							 unsigned int sg_len,
20558d318a50SLinus Walleij 							 enum dma_data_direction direction,
20562a614340SJonas Aaberg 							 unsigned long dma_flags)
20578d318a50SLinus Walleij {
20588d318a50SLinus Walleij 	struct d40_desc *d40d;
20598d318a50SLinus Walleij 	struct d40_chan *d40c = container_of(chan, struct d40_chan,
20608d318a50SLinus Walleij 					     chan);
20612a614340SJonas Aaberg 	unsigned long flags;
20628d318a50SLinus Walleij 	int err;
20638d318a50SLinus Walleij 
20640d0f6b8bSJonas Aaberg 	if (d40c->phy_chan == NULL) {
20650d0f6b8bSJonas Aaberg 		dev_err(&d40c->chan.dev->device,
20660d0f6b8bSJonas Aaberg 			"[%s] Cannot prepare unallocated channel\n", __func__);
20670d0f6b8bSJonas Aaberg 		return ERR_PTR(-EINVAL);
20680d0f6b8bSJonas Aaberg 	}
20690d0f6b8bSJonas Aaberg 
20708d318a50SLinus Walleij 	if (d40c->dma_cfg.pre_transfer)
20718d318a50SLinus Walleij 		d40c->dma_cfg.pre_transfer(chan,
20728d318a50SLinus Walleij 					   d40c->dma_cfg.pre_transfer_data,
20738d318a50SLinus Walleij 					   sg_dma_len(sgl));
20748d318a50SLinus Walleij 
20752a614340SJonas Aaberg 	spin_lock_irqsave(&d40c->lock, flags);
20768d318a50SLinus Walleij 	d40d = d40_desc_get(d40c);
20772a614340SJonas Aaberg 	spin_unlock_irqrestore(&d40c->lock, flags);
20788d318a50SLinus Walleij 
20798d318a50SLinus Walleij 	if (d40d == NULL)
20808d318a50SLinus Walleij 		return NULL;
20818d318a50SLinus Walleij 
20828d318a50SLinus Walleij 	if (d40c->log_num != D40_PHY_CHAN)
20838d318a50SLinus Walleij 		err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
20842a614340SJonas Aaberg 					    direction, dma_flags);
20858d318a50SLinus Walleij 	else
20868d318a50SLinus Walleij 		err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
20872a614340SJonas Aaberg 					    direction, dma_flags);
20888d318a50SLinus Walleij 	if (err) {
20898d318a50SLinus Walleij 		dev_err(&d40c->chan.dev->device,
20908d318a50SLinus Walleij 			"[%s] Failed to prepare %s slave sg job: %d\n",
20918d318a50SLinus Walleij 			__func__,
20928d318a50SLinus Walleij 			d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
20938d318a50SLinus Walleij 		return NULL;
20948d318a50SLinus Walleij 	}
20958d318a50SLinus Walleij 
20962a614340SJonas Aaberg 	d40d->txd.flags = dma_flags;
20978d318a50SLinus Walleij 
20988d318a50SLinus Walleij 	dma_async_tx_descriptor_init(&d40d->txd, chan);
20998d318a50SLinus Walleij 
21008d318a50SLinus Walleij 	d40d->txd.tx_submit = d40_tx_submit;
21018d318a50SLinus Walleij 
21028d318a50SLinus Walleij 	return &d40d->txd;
21038d318a50SLinus Walleij }
21048d318a50SLinus Walleij 
21058d318a50SLinus Walleij static enum dma_status d40_tx_status(struct dma_chan *chan,
21068d318a50SLinus Walleij 				     dma_cookie_t cookie,
21078d318a50SLinus Walleij 				     struct dma_tx_state *txstate)
21088d318a50SLinus Walleij {
21098d318a50SLinus Walleij 	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
21108d318a50SLinus Walleij 	dma_cookie_t last_used;
21118d318a50SLinus Walleij 	dma_cookie_t last_complete;
21128d318a50SLinus Walleij 	int ret;
21138d318a50SLinus Walleij 
21140d0f6b8bSJonas Aaberg 	if (d40c->phy_chan == NULL) {
21150d0f6b8bSJonas Aaberg 		dev_err(&d40c->chan.dev->device,
21160d0f6b8bSJonas Aaberg 			"[%s] Cannot read status of unallocated channel\n",
21170d0f6b8bSJonas Aaberg 			__func__);
21180d0f6b8bSJonas Aaberg 		return -EINVAL;
21190d0f6b8bSJonas Aaberg 	}
21200d0f6b8bSJonas Aaberg 
21218d318a50SLinus Walleij 	last_complete = d40c->completed;
21228d318a50SLinus Walleij 	last_used = chan->cookie;
21238d318a50SLinus Walleij 
2124a5ebca47SJonas Aaberg 	if (d40_is_paused(d40c))
2125a5ebca47SJonas Aaberg 		ret = DMA_PAUSED;
2126a5ebca47SJonas Aaberg 	else
21278d318a50SLinus Walleij 		ret = dma_async_is_complete(cookie, last_complete, last_used);
21288d318a50SLinus Walleij 
2129a5ebca47SJonas Aaberg 	dma_set_tx_state(txstate, last_complete, last_used,
2130a5ebca47SJonas Aaberg 			 stedma40_residue(chan));
21318d318a50SLinus Walleij 
21328d318a50SLinus Walleij 	return ret;
21338d318a50SLinus Walleij }
21348d318a50SLinus Walleij 
21358d318a50SLinus Walleij static void d40_issue_pending(struct dma_chan *chan)
21368d318a50SLinus Walleij {
21378d318a50SLinus Walleij 	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
21388d318a50SLinus Walleij 	unsigned long flags;
21398d318a50SLinus Walleij 
21400d0f6b8bSJonas Aaberg 	if (d40c->phy_chan == NULL) {
21410d0f6b8bSJonas Aaberg 		dev_err(&d40c->chan.dev->device,
21420d0f6b8bSJonas Aaberg 			"[%s] Channel is not allocated!\n", __func__);
21430d0f6b8bSJonas Aaberg 		return;
21440d0f6b8bSJonas Aaberg 	}
21450d0f6b8bSJonas Aaberg 
21468d318a50SLinus Walleij 	spin_lock_irqsave(&d40c->lock, flags);
21478d318a50SLinus Walleij 
21488d318a50SLinus Walleij 	/* Busy means that pending jobs are already being processed */
21498d318a50SLinus Walleij 	if (!d40c->busy)
21508d318a50SLinus Walleij 		(void) d40_queue_start(d40c);
21518d318a50SLinus Walleij 
21528d318a50SLinus Walleij 	spin_unlock_irqrestore(&d40c->lock, flags);
21538d318a50SLinus Walleij }
21548d318a50SLinus Walleij 
215595e1400fSLinus Walleij /* Runtime reconfiguration extension */
215695e1400fSLinus Walleij static void d40_set_runtime_config(struct dma_chan *chan,
215795e1400fSLinus Walleij 			       struct dma_slave_config *config)
215895e1400fSLinus Walleij {
215995e1400fSLinus Walleij 	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
216095e1400fSLinus Walleij 	struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
216195e1400fSLinus Walleij 	enum dma_slave_buswidth config_addr_width;
216295e1400fSLinus Walleij 	dma_addr_t config_addr;
216395e1400fSLinus Walleij 	u32 config_maxburst;
216495e1400fSLinus Walleij 	enum stedma40_periph_data_width addr_width;
216595e1400fSLinus Walleij 	int psize;
216695e1400fSLinus Walleij 
216795e1400fSLinus Walleij 	if (config->direction == DMA_FROM_DEVICE) {
216895e1400fSLinus Walleij 		dma_addr_t dev_addr_rx =
216995e1400fSLinus Walleij 			d40c->base->plat_data->dev_rx[cfg->src_dev_type];
217095e1400fSLinus Walleij 
217195e1400fSLinus Walleij 		config_addr = config->src_addr;
217295e1400fSLinus Walleij 		if (dev_addr_rx)
217395e1400fSLinus Walleij 			dev_dbg(d40c->base->dev,
217495e1400fSLinus Walleij 				"channel has a pre-wired RX address %08x "
217595e1400fSLinus Walleij 				"overriding with %08x\n",
217695e1400fSLinus Walleij 				dev_addr_rx, config_addr);
217795e1400fSLinus Walleij 		if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
217895e1400fSLinus Walleij 			dev_dbg(d40c->base->dev,
217995e1400fSLinus Walleij 				"channel was not configured for peripheral "
218095e1400fSLinus Walleij 				"to memory transfer (%d) overriding\n",
218195e1400fSLinus Walleij 				cfg->dir);
218295e1400fSLinus Walleij 		cfg->dir = STEDMA40_PERIPH_TO_MEM;
218395e1400fSLinus Walleij 
218495e1400fSLinus Walleij 		config_addr_width = config->src_addr_width;
218595e1400fSLinus Walleij 		config_maxburst = config->src_maxburst;
218695e1400fSLinus Walleij 
218795e1400fSLinus Walleij 	} else if (config->direction == DMA_TO_DEVICE) {
218895e1400fSLinus Walleij 		dma_addr_t dev_addr_tx =
218995e1400fSLinus Walleij 			d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
219095e1400fSLinus Walleij 
219195e1400fSLinus Walleij 		config_addr = config->dst_addr;
219295e1400fSLinus Walleij 		if (dev_addr_tx)
219395e1400fSLinus Walleij 			dev_dbg(d40c->base->dev,
219495e1400fSLinus Walleij 				"channel has a pre-wired TX address %08x "
219595e1400fSLinus Walleij 				"overriding with %08x\n",
219695e1400fSLinus Walleij 				dev_addr_tx, config_addr);
219795e1400fSLinus Walleij 		if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
219895e1400fSLinus Walleij 			dev_dbg(d40c->base->dev,
219995e1400fSLinus Walleij 				"channel was not configured for memory "
220095e1400fSLinus Walleij 				"to peripheral transfer (%d) overriding\n",
220195e1400fSLinus Walleij 				cfg->dir);
220295e1400fSLinus Walleij 		cfg->dir = STEDMA40_MEM_TO_PERIPH;
220395e1400fSLinus Walleij 
220495e1400fSLinus Walleij 		config_addr_width = config->dst_addr_width;
220595e1400fSLinus Walleij 		config_maxburst = config->dst_maxburst;
220695e1400fSLinus Walleij 
220795e1400fSLinus Walleij 	} else {
220895e1400fSLinus Walleij 		dev_err(d40c->base->dev,
220995e1400fSLinus Walleij 			"unrecognized channel direction %d\n",
221095e1400fSLinus Walleij 			config->direction);
221195e1400fSLinus Walleij 		return;
221295e1400fSLinus Walleij 	}
221395e1400fSLinus Walleij 
221495e1400fSLinus Walleij 	switch (config_addr_width) {
221595e1400fSLinus Walleij 	case DMA_SLAVE_BUSWIDTH_1_BYTE:
221695e1400fSLinus Walleij 		addr_width = STEDMA40_BYTE_WIDTH;
221795e1400fSLinus Walleij 		break;
221895e1400fSLinus Walleij 	case DMA_SLAVE_BUSWIDTH_2_BYTES:
221995e1400fSLinus Walleij 		addr_width = STEDMA40_HALFWORD_WIDTH;
222095e1400fSLinus Walleij 		break;
222195e1400fSLinus Walleij 	case DMA_SLAVE_BUSWIDTH_4_BYTES:
222295e1400fSLinus Walleij 		addr_width = STEDMA40_WORD_WIDTH;
222395e1400fSLinus Walleij 		break;
222495e1400fSLinus Walleij 	case DMA_SLAVE_BUSWIDTH_8_BYTES:
222595e1400fSLinus Walleij 		addr_width = STEDMA40_DOUBLEWORD_WIDTH;
222695e1400fSLinus Walleij 		break;
222795e1400fSLinus Walleij 	default:
222895e1400fSLinus Walleij 		dev_err(d40c->base->dev,
222995e1400fSLinus Walleij 			"illegal peripheral address width "
223095e1400fSLinus Walleij 			"requested (%d)\n",
223195e1400fSLinus Walleij 			config->src_addr_width);
223295e1400fSLinus Walleij 		return;
223395e1400fSLinus Walleij 	}
223495e1400fSLinus Walleij 
223595e1400fSLinus Walleij 	if (config_maxburst >= 16)
223695e1400fSLinus Walleij 		psize = STEDMA40_PSIZE_LOG_16;
223795e1400fSLinus Walleij 	else if (config_maxburst >= 8)
223895e1400fSLinus Walleij 		psize = STEDMA40_PSIZE_LOG_8;
223995e1400fSLinus Walleij 	else if (config_maxburst >= 4)
224095e1400fSLinus Walleij 		psize = STEDMA40_PSIZE_LOG_4;
224195e1400fSLinus Walleij 	else
224295e1400fSLinus Walleij 		psize = STEDMA40_PSIZE_LOG_1;
224395e1400fSLinus Walleij 
224495e1400fSLinus Walleij 	/* Set up all the endpoint configs */
224595e1400fSLinus Walleij 	cfg->src_info.data_width = addr_width;
224695e1400fSLinus Walleij 	cfg->src_info.psize = psize;
224795e1400fSLinus Walleij 	cfg->src_info.endianess = STEDMA40_LITTLE_ENDIAN;
224895e1400fSLinus Walleij 	cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
224995e1400fSLinus Walleij 	cfg->dst_info.data_width = addr_width;
225095e1400fSLinus Walleij 	cfg->dst_info.psize = psize;
225195e1400fSLinus Walleij 	cfg->dst_info.endianess = STEDMA40_LITTLE_ENDIAN;
225295e1400fSLinus Walleij 	cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
225395e1400fSLinus Walleij 
225495e1400fSLinus Walleij 	/* These settings will take precedence later */
225595e1400fSLinus Walleij 	d40c->runtime_addr = config_addr;
225695e1400fSLinus Walleij 	d40c->runtime_direction = config->direction;
225795e1400fSLinus Walleij 	dev_dbg(d40c->base->dev,
225895e1400fSLinus Walleij 		"configured channel %s for %s, data width %d, "
225995e1400fSLinus Walleij 		"maxburst %d bytes, LE, no flow control\n",
226095e1400fSLinus Walleij 		dma_chan_name(chan),
226195e1400fSLinus Walleij 		(config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
226295e1400fSLinus Walleij 		config_addr_width,
226395e1400fSLinus Walleij 		config_maxburst);
226495e1400fSLinus Walleij }
226595e1400fSLinus Walleij 
226605827630SLinus Walleij static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
226705827630SLinus Walleij 		       unsigned long arg)
22688d318a50SLinus Walleij {
22698d318a50SLinus Walleij 	unsigned long flags;
22708d318a50SLinus Walleij 	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
22718d318a50SLinus Walleij 
22720d0f6b8bSJonas Aaberg 	if (d40c->phy_chan == NULL) {
22730d0f6b8bSJonas Aaberg 		dev_err(&d40c->chan.dev->device,
22740d0f6b8bSJonas Aaberg 			"[%s] Channel is not allocated!\n", __func__);
22750d0f6b8bSJonas Aaberg 		return -EINVAL;
22760d0f6b8bSJonas Aaberg 	}
22770d0f6b8bSJonas Aaberg 
22788d318a50SLinus Walleij 	switch (cmd) {
22798d318a50SLinus Walleij 	case DMA_TERMINATE_ALL:
22808d318a50SLinus Walleij 		spin_lock_irqsave(&d40c->lock, flags);
22818d318a50SLinus Walleij 		d40_term_all(d40c);
22828d318a50SLinus Walleij 		spin_unlock_irqrestore(&d40c->lock, flags);
22838d318a50SLinus Walleij 		return 0;
22848d318a50SLinus Walleij 	case DMA_PAUSE:
22858d318a50SLinus Walleij 		return d40_pause(chan);
22868d318a50SLinus Walleij 	case DMA_RESUME:
22878d318a50SLinus Walleij 		return d40_resume(chan);
228895e1400fSLinus Walleij 	case DMA_SLAVE_CONFIG:
228995e1400fSLinus Walleij 		d40_set_runtime_config(chan,
229095e1400fSLinus Walleij 			(struct dma_slave_config *) arg);
229195e1400fSLinus Walleij 		return 0;
229295e1400fSLinus Walleij 	default:
229395e1400fSLinus Walleij 		break;
22948d318a50SLinus Walleij 	}
22958d318a50SLinus Walleij 
22968d318a50SLinus Walleij 	/* Other commands are unimplemented */
22978d318a50SLinus Walleij 	return -ENXIO;
22988d318a50SLinus Walleij }
22998d318a50SLinus Walleij 
23008d318a50SLinus Walleij /* Initialization functions */
23018d318a50SLinus Walleij 
23028d318a50SLinus Walleij static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
23038d318a50SLinus Walleij 				 struct d40_chan *chans, int offset,
23048d318a50SLinus Walleij 				 int num_chans)
23058d318a50SLinus Walleij {
23068d318a50SLinus Walleij 	int i = 0;
23078d318a50SLinus Walleij 	struct d40_chan *d40c;
23088d318a50SLinus Walleij 
23098d318a50SLinus Walleij 	INIT_LIST_HEAD(&dma->channels);
23108d318a50SLinus Walleij 
23118d318a50SLinus Walleij 	for (i = offset; i < offset + num_chans; i++) {
23128d318a50SLinus Walleij 		d40c = &chans[i];
23138d318a50SLinus Walleij 		d40c->base = base;
23148d318a50SLinus Walleij 		d40c->chan.device = dma;
23158d318a50SLinus Walleij 
23168d318a50SLinus Walleij 		spin_lock_init(&d40c->lock);
23178d318a50SLinus Walleij 
23188d318a50SLinus Walleij 		d40c->log_num = D40_PHY_CHAN;
23198d318a50SLinus Walleij 
23208d318a50SLinus Walleij 		INIT_LIST_HEAD(&d40c->active);
23218d318a50SLinus Walleij 		INIT_LIST_HEAD(&d40c->queue);
23228d318a50SLinus Walleij 		INIT_LIST_HEAD(&d40c->client);
23238d318a50SLinus Walleij 
23248d318a50SLinus Walleij 		tasklet_init(&d40c->tasklet, dma_tasklet,
23258d318a50SLinus Walleij 			     (unsigned long) d40c);
23268d318a50SLinus Walleij 
23278d318a50SLinus Walleij 		list_add_tail(&d40c->chan.device_node,
23288d318a50SLinus Walleij 			      &dma->channels);
23298d318a50SLinus Walleij 	}
23308d318a50SLinus Walleij }
23318d318a50SLinus Walleij 
23328d318a50SLinus Walleij static int __init d40_dmaengine_init(struct d40_base *base,
23338d318a50SLinus Walleij 				     int num_reserved_chans)
23348d318a50SLinus Walleij {
23358d318a50SLinus Walleij 	int err ;
23368d318a50SLinus Walleij 
23378d318a50SLinus Walleij 	d40_chan_init(base, &base->dma_slave, base->log_chans,
23388d318a50SLinus Walleij 		      0, base->num_log_chans);
23398d318a50SLinus Walleij 
23408d318a50SLinus Walleij 	dma_cap_zero(base->dma_slave.cap_mask);
23418d318a50SLinus Walleij 	dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
23428d318a50SLinus Walleij 
23438d318a50SLinus Walleij 	base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
23448d318a50SLinus Walleij 	base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
23458d318a50SLinus Walleij 	base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
23468d318a50SLinus Walleij 	base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
23478d318a50SLinus Walleij 	base->dma_slave.device_tx_status = d40_tx_status;
23488d318a50SLinus Walleij 	base->dma_slave.device_issue_pending = d40_issue_pending;
23498d318a50SLinus Walleij 	base->dma_slave.device_control = d40_control;
23508d318a50SLinus Walleij 	base->dma_slave.dev = base->dev;
23518d318a50SLinus Walleij 
23528d318a50SLinus Walleij 	err = dma_async_device_register(&base->dma_slave);
23538d318a50SLinus Walleij 
23548d318a50SLinus Walleij 	if (err) {
23558d318a50SLinus Walleij 		dev_err(base->dev,
23568d318a50SLinus Walleij 			"[%s] Failed to register slave channels\n",
23578d318a50SLinus Walleij 			__func__);
23588d318a50SLinus Walleij 		goto failure1;
23598d318a50SLinus Walleij 	}
23608d318a50SLinus Walleij 
23618d318a50SLinus Walleij 	d40_chan_init(base, &base->dma_memcpy, base->log_chans,
23628d318a50SLinus Walleij 		      base->num_log_chans, base->plat_data->memcpy_len);
23638d318a50SLinus Walleij 
23648d318a50SLinus Walleij 	dma_cap_zero(base->dma_memcpy.cap_mask);
23658d318a50SLinus Walleij 	dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
23668d318a50SLinus Walleij 
23678d318a50SLinus Walleij 	base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
23688d318a50SLinus Walleij 	base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
23698d318a50SLinus Walleij 	base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
23708d318a50SLinus Walleij 	base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
23718d318a50SLinus Walleij 	base->dma_memcpy.device_tx_status = d40_tx_status;
23728d318a50SLinus Walleij 	base->dma_memcpy.device_issue_pending = d40_issue_pending;
23738d318a50SLinus Walleij 	base->dma_memcpy.device_control = d40_control;
23748d318a50SLinus Walleij 	base->dma_memcpy.dev = base->dev;
23758d318a50SLinus Walleij 	/*
23768d318a50SLinus Walleij 	 * This controller can only access address at even
23778d318a50SLinus Walleij 	 * 32bit boundaries, i.e. 2^2
23788d318a50SLinus Walleij 	 */
23798d318a50SLinus Walleij 	base->dma_memcpy.copy_align = 2;
23808d318a50SLinus Walleij 
23818d318a50SLinus Walleij 	err = dma_async_device_register(&base->dma_memcpy);
23828d318a50SLinus Walleij 
23838d318a50SLinus Walleij 	if (err) {
23848d318a50SLinus Walleij 		dev_err(base->dev,
23858d318a50SLinus Walleij 			"[%s] Failed to regsiter memcpy only channels\n",
23868d318a50SLinus Walleij 			__func__);
23878d318a50SLinus Walleij 		goto failure2;
23888d318a50SLinus Walleij 	}
23898d318a50SLinus Walleij 
23908d318a50SLinus Walleij 	d40_chan_init(base, &base->dma_both, base->phy_chans,
23918d318a50SLinus Walleij 		      0, num_reserved_chans);
23928d318a50SLinus Walleij 
23938d318a50SLinus Walleij 	dma_cap_zero(base->dma_both.cap_mask);
23948d318a50SLinus Walleij 	dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
23958d318a50SLinus Walleij 	dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
23968d318a50SLinus Walleij 
23978d318a50SLinus Walleij 	base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
23988d318a50SLinus Walleij 	base->dma_both.device_free_chan_resources = d40_free_chan_resources;
23998d318a50SLinus Walleij 	base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
24008d318a50SLinus Walleij 	base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
24018d318a50SLinus Walleij 	base->dma_both.device_tx_status = d40_tx_status;
24028d318a50SLinus Walleij 	base->dma_both.device_issue_pending = d40_issue_pending;
24038d318a50SLinus Walleij 	base->dma_both.device_control = d40_control;
24048d318a50SLinus Walleij 	base->dma_both.dev = base->dev;
24058d318a50SLinus Walleij 	base->dma_both.copy_align = 2;
24068d318a50SLinus Walleij 	err = dma_async_device_register(&base->dma_both);
24078d318a50SLinus Walleij 
24088d318a50SLinus Walleij 	if (err) {
24098d318a50SLinus Walleij 		dev_err(base->dev,
24108d318a50SLinus Walleij 			"[%s] Failed to register logical and physical capable channels\n",
24118d318a50SLinus Walleij 			__func__);
24128d318a50SLinus Walleij 		goto failure3;
24138d318a50SLinus Walleij 	}
24148d318a50SLinus Walleij 	return 0;
24158d318a50SLinus Walleij failure3:
24168d318a50SLinus Walleij 	dma_async_device_unregister(&base->dma_memcpy);
24178d318a50SLinus Walleij failure2:
24188d318a50SLinus Walleij 	dma_async_device_unregister(&base->dma_slave);
24198d318a50SLinus Walleij failure1:
24208d318a50SLinus Walleij 	return err;
24218d318a50SLinus Walleij }
24228d318a50SLinus Walleij 
24238d318a50SLinus Walleij /* Initialization functions. */
24248d318a50SLinus Walleij 
24258d318a50SLinus Walleij static int __init d40_phy_res_init(struct d40_base *base)
24268d318a50SLinus Walleij {
24278d318a50SLinus Walleij 	int i;
24288d318a50SLinus Walleij 	int num_phy_chans_avail = 0;
24298d318a50SLinus Walleij 	u32 val[2];
24308d318a50SLinus Walleij 	int odd_even_bit = -2;
24318d318a50SLinus Walleij 
24328d318a50SLinus Walleij 	val[0] = readl(base->virtbase + D40_DREG_PRSME);
24338d318a50SLinus Walleij 	val[1] = readl(base->virtbase + D40_DREG_PRSMO);
24348d318a50SLinus Walleij 
24358d318a50SLinus Walleij 	for (i = 0; i < base->num_phy_chans; i++) {
24368d318a50SLinus Walleij 		base->phy_res[i].num = i;
24378d318a50SLinus Walleij 		odd_even_bit += 2 * ((i % 2) == 0);
24388d318a50SLinus Walleij 		if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
24398d318a50SLinus Walleij 			/* Mark security only channels as occupied */
24408d318a50SLinus Walleij 			base->phy_res[i].allocated_src = D40_ALLOC_PHY;
24418d318a50SLinus Walleij 			base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
24428d318a50SLinus Walleij 		} else {
24438d318a50SLinus Walleij 			base->phy_res[i].allocated_src = D40_ALLOC_FREE;
24448d318a50SLinus Walleij 			base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
24458d318a50SLinus Walleij 			num_phy_chans_avail++;
24468d318a50SLinus Walleij 		}
24478d318a50SLinus Walleij 		spin_lock_init(&base->phy_res[i].lock);
24488d318a50SLinus Walleij 	}
24496b7acd84SJonas Aaberg 
24506b7acd84SJonas Aaberg 	/* Mark disabled channels as occupied */
24516b7acd84SJonas Aaberg 	for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
2452f57b407cSRabin Vincent 		int chan = base->plat_data->disabled_channels[i];
2453f57b407cSRabin Vincent 
2454f57b407cSRabin Vincent 		base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
2455f57b407cSRabin Vincent 		base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
24566b7acd84SJonas Aaberg 		num_phy_chans_avail--;
24576b7acd84SJonas Aaberg 	}
24586b7acd84SJonas Aaberg 
24598d318a50SLinus Walleij 	dev_info(base->dev, "%d of %d physical DMA channels available\n",
24608d318a50SLinus Walleij 		 num_phy_chans_avail, base->num_phy_chans);
24618d318a50SLinus Walleij 
24628d318a50SLinus Walleij 	/* Verify settings extended vs standard */
24638d318a50SLinus Walleij 	val[0] = readl(base->virtbase + D40_DREG_PRTYP);
24648d318a50SLinus Walleij 
24658d318a50SLinus Walleij 	for (i = 0; i < base->num_phy_chans; i++) {
24668d318a50SLinus Walleij 
24678d318a50SLinus Walleij 		if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
24688d318a50SLinus Walleij 		    (val[0] & 0x3) != 1)
24698d318a50SLinus Walleij 			dev_info(base->dev,
24708d318a50SLinus Walleij 				 "[%s] INFO: channel %d is misconfigured (%d)\n",
24718d318a50SLinus Walleij 				 __func__, i, val[0] & 0x3);
24728d318a50SLinus Walleij 
24738d318a50SLinus Walleij 		val[0] = val[0] >> 2;
24748d318a50SLinus Walleij 	}
24758d318a50SLinus Walleij 
24768d318a50SLinus Walleij 	return num_phy_chans_avail;
24778d318a50SLinus Walleij }
24788d318a50SLinus Walleij 
24798d318a50SLinus Walleij static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
24808d318a50SLinus Walleij {
24818d318a50SLinus Walleij 	static const struct d40_reg_val dma_id_regs[] = {
24828d318a50SLinus Walleij 		/* Peripheral Id */
24838d318a50SLinus Walleij 		{ .reg = D40_DREG_PERIPHID0, .val = 0x0040},
24848d318a50SLinus Walleij 		{ .reg = D40_DREG_PERIPHID1, .val = 0x0000},
24858d318a50SLinus Walleij 		/*
24868d318a50SLinus Walleij 		 * D40_DREG_PERIPHID2 Depends on HW revision:
24878d318a50SLinus Walleij 		 *  MOP500/HREF ED has 0x0008,
24888d318a50SLinus Walleij 		 *  ? has 0x0018,
24898d318a50SLinus Walleij 		 *  HREF V1 has 0x0028
24908d318a50SLinus Walleij 		 */
24918d318a50SLinus Walleij 		{ .reg = D40_DREG_PERIPHID3, .val = 0x0000},
24928d318a50SLinus Walleij 
24938d318a50SLinus Walleij 		/* PCell Id */
24948d318a50SLinus Walleij 		{ .reg = D40_DREG_CELLID0, .val = 0x000d},
24958d318a50SLinus Walleij 		{ .reg = D40_DREG_CELLID1, .val = 0x00f0},
24968d318a50SLinus Walleij 		{ .reg = D40_DREG_CELLID2, .val = 0x0005},
24978d318a50SLinus Walleij 		{ .reg = D40_DREG_CELLID3, .val = 0x00b1}
24988d318a50SLinus Walleij 	};
24998d318a50SLinus Walleij 	struct stedma40_platform_data *plat_data;
25008d318a50SLinus Walleij 	struct clk *clk = NULL;
25018d318a50SLinus Walleij 	void __iomem *virtbase = NULL;
25028d318a50SLinus Walleij 	struct resource *res = NULL;
25038d318a50SLinus Walleij 	struct d40_base *base = NULL;
25048d318a50SLinus Walleij 	int num_log_chans = 0;
25058d318a50SLinus Walleij 	int num_phy_chans;
25068d318a50SLinus Walleij 	int i;
2507f4185592SLinus Walleij 	u32 val;
25083ae0267fSJonas Aaberg 	u32 rev;
25098d318a50SLinus Walleij 
25108d318a50SLinus Walleij 	clk = clk_get(&pdev->dev, NULL);
25118d318a50SLinus Walleij 
25128d318a50SLinus Walleij 	if (IS_ERR(clk)) {
25138d318a50SLinus Walleij 		dev_err(&pdev->dev, "[%s] No matching clock found\n",
25148d318a50SLinus Walleij 			__func__);
25158d318a50SLinus Walleij 		goto failure;
25168d318a50SLinus Walleij 	}
25178d318a50SLinus Walleij 
25188d318a50SLinus Walleij 	clk_enable(clk);
25198d318a50SLinus Walleij 
25208d318a50SLinus Walleij 	/* Get IO for DMAC base address */
25218d318a50SLinus Walleij 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
25228d318a50SLinus Walleij 	if (!res)
25238d318a50SLinus Walleij 		goto failure;
25248d318a50SLinus Walleij 
25258d318a50SLinus Walleij 	if (request_mem_region(res->start, resource_size(res),
25268d318a50SLinus Walleij 			       D40_NAME " I/O base") == NULL)
25278d318a50SLinus Walleij 		goto failure;
25288d318a50SLinus Walleij 
25298d318a50SLinus Walleij 	virtbase = ioremap(res->start, resource_size(res));
25308d318a50SLinus Walleij 	if (!virtbase)
25318d318a50SLinus Walleij 		goto failure;
25328d318a50SLinus Walleij 
25338d318a50SLinus Walleij 	/* HW version check */
25348d318a50SLinus Walleij 	for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
25358d318a50SLinus Walleij 		if (dma_id_regs[i].val !=
25368d318a50SLinus Walleij 		    readl(virtbase + dma_id_regs[i].reg)) {
25378d318a50SLinus Walleij 			dev_err(&pdev->dev,
25388d318a50SLinus Walleij 				"[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
25398d318a50SLinus Walleij 				__func__,
25408d318a50SLinus Walleij 				dma_id_regs[i].val,
25418d318a50SLinus Walleij 				dma_id_regs[i].reg,
25428d318a50SLinus Walleij 				readl(virtbase + dma_id_regs[i].reg));
25438d318a50SLinus Walleij 			goto failure;
25448d318a50SLinus Walleij 		}
25458d318a50SLinus Walleij 	}
25468d318a50SLinus Walleij 
25473ae0267fSJonas Aaberg 	/* Get silicon revision and designer */
2548f4185592SLinus Walleij 	val = readl(virtbase + D40_DREG_PERIPHID2);
25498d318a50SLinus Walleij 
25503ae0267fSJonas Aaberg 	if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) !=
25513ae0267fSJonas Aaberg 	    D40_HW_DESIGNER) {
25528d318a50SLinus Walleij 		dev_err(&pdev->dev,
25538d318a50SLinus Walleij 			"[%s] Unknown designer! Got %x wanted %x\n",
25543ae0267fSJonas Aaberg 			__func__, val & D40_DREG_PERIPHID2_DESIGNER_MASK,
25553ae0267fSJonas Aaberg 			D40_HW_DESIGNER);
25568d318a50SLinus Walleij 		goto failure;
25578d318a50SLinus Walleij 	}
25588d318a50SLinus Walleij 
25593ae0267fSJonas Aaberg 	rev = (val & D40_DREG_PERIPHID2_REV_MASK) >>
25603ae0267fSJonas Aaberg 		D40_DREG_PERIPHID2_REV_POS;
25613ae0267fSJonas Aaberg 
25628d318a50SLinus Walleij 	/* The number of physical channels on this HW */
25638d318a50SLinus Walleij 	num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
25648d318a50SLinus Walleij 
25658d318a50SLinus Walleij 	dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
25663ae0267fSJonas Aaberg 		 rev, res->start);
25678d318a50SLinus Walleij 
25688d318a50SLinus Walleij 	plat_data = pdev->dev.platform_data;
25698d318a50SLinus Walleij 
25708d318a50SLinus Walleij 	/* Count the number of logical channels in use */
25718d318a50SLinus Walleij 	for (i = 0; i < plat_data->dev_len; i++)
25728d318a50SLinus Walleij 		if (plat_data->dev_rx[i] != 0)
25738d318a50SLinus Walleij 			num_log_chans++;
25748d318a50SLinus Walleij 
25758d318a50SLinus Walleij 	for (i = 0; i < plat_data->dev_len; i++)
25768d318a50SLinus Walleij 		if (plat_data->dev_tx[i] != 0)
25778d318a50SLinus Walleij 			num_log_chans++;
25788d318a50SLinus Walleij 
25798d318a50SLinus Walleij 	base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
25808d318a50SLinus Walleij 		       (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
25818d318a50SLinus Walleij 		       sizeof(struct d40_chan), GFP_KERNEL);
25828d318a50SLinus Walleij 
25838d318a50SLinus Walleij 	if (base == NULL) {
25848d318a50SLinus Walleij 		dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
25858d318a50SLinus Walleij 		goto failure;
25868d318a50SLinus Walleij 	}
25878d318a50SLinus Walleij 
25883ae0267fSJonas Aaberg 	base->rev = rev;
25898d318a50SLinus Walleij 	base->clk = clk;
25908d318a50SLinus Walleij 	base->num_phy_chans = num_phy_chans;
25918d318a50SLinus Walleij 	base->num_log_chans = num_log_chans;
25928d318a50SLinus Walleij 	base->phy_start = res->start;
25938d318a50SLinus Walleij 	base->phy_size = resource_size(res);
25948d318a50SLinus Walleij 	base->virtbase = virtbase;
25958d318a50SLinus Walleij 	base->plat_data = plat_data;
25968d318a50SLinus Walleij 	base->dev = &pdev->dev;
25978d318a50SLinus Walleij 	base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
25988d318a50SLinus Walleij 	base->log_chans = &base->phy_chans[num_phy_chans];
25998d318a50SLinus Walleij 
26008d318a50SLinus Walleij 	base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
26018d318a50SLinus Walleij 				GFP_KERNEL);
26028d318a50SLinus Walleij 	if (!base->phy_res)
26038d318a50SLinus Walleij 		goto failure;
26048d318a50SLinus Walleij 
26058d318a50SLinus Walleij 	base->lookup_phy_chans = kzalloc(num_phy_chans *
26068d318a50SLinus Walleij 					 sizeof(struct d40_chan *),
26078d318a50SLinus Walleij 					 GFP_KERNEL);
26088d318a50SLinus Walleij 	if (!base->lookup_phy_chans)
26098d318a50SLinus Walleij 		goto failure;
26108d318a50SLinus Walleij 
26118d318a50SLinus Walleij 	if (num_log_chans + plat_data->memcpy_len) {
26128d318a50SLinus Walleij 		/*
26138d318a50SLinus Walleij 		 * The max number of logical channels are event lines for all
26148d318a50SLinus Walleij 		 * src devices and dst devices
26158d318a50SLinus Walleij 		 */
26168d318a50SLinus Walleij 		base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
26178d318a50SLinus Walleij 						 sizeof(struct d40_chan *),
26188d318a50SLinus Walleij 						 GFP_KERNEL);
26198d318a50SLinus Walleij 		if (!base->lookup_log_chans)
26208d318a50SLinus Walleij 			goto failure;
26218d318a50SLinus Walleij 	}
2622698e4732SJonas Aaberg 
2623698e4732SJonas Aaberg 	base->lcla_pool.alloc_map = kzalloc(num_phy_chans *
2624698e4732SJonas Aaberg 					    sizeof(struct d40_desc *) *
2625698e4732SJonas Aaberg 					    D40_LCLA_LINK_PER_EVENT_GRP,
26268d318a50SLinus Walleij 					    GFP_KERNEL);
26278d318a50SLinus Walleij 	if (!base->lcla_pool.alloc_map)
26288d318a50SLinus Walleij 		goto failure;
26298d318a50SLinus Walleij 
2630c675b1b4SJonas Aaberg 	base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
2631c675b1b4SJonas Aaberg 					    0, SLAB_HWCACHE_ALIGN,
2632c675b1b4SJonas Aaberg 					    NULL);
2633c675b1b4SJonas Aaberg 	if (base->desc_slab == NULL)
2634c675b1b4SJonas Aaberg 		goto failure;
2635c675b1b4SJonas Aaberg 
26368d318a50SLinus Walleij 	return base;
26378d318a50SLinus Walleij 
26388d318a50SLinus Walleij failure:
2639*c6134c96SRabin Vincent 	if (!IS_ERR(clk)) {
26408d318a50SLinus Walleij 		clk_disable(clk);
26418d318a50SLinus Walleij 		clk_put(clk);
26428d318a50SLinus Walleij 	}
26438d318a50SLinus Walleij 	if (virtbase)
26448d318a50SLinus Walleij 		iounmap(virtbase);
26458d318a50SLinus Walleij 	if (res)
26468d318a50SLinus Walleij 		release_mem_region(res->start,
26478d318a50SLinus Walleij 				   resource_size(res));
26488d318a50SLinus Walleij 	if (virtbase)
26498d318a50SLinus Walleij 		iounmap(virtbase);
26508d318a50SLinus Walleij 
26518d318a50SLinus Walleij 	if (base) {
26528d318a50SLinus Walleij 		kfree(base->lcla_pool.alloc_map);
26538d318a50SLinus Walleij 		kfree(base->lookup_log_chans);
26548d318a50SLinus Walleij 		kfree(base->lookup_phy_chans);
26558d318a50SLinus Walleij 		kfree(base->phy_res);
26568d318a50SLinus Walleij 		kfree(base);
26578d318a50SLinus Walleij 	}
26588d318a50SLinus Walleij 
26598d318a50SLinus Walleij 	return NULL;
26608d318a50SLinus Walleij }
26618d318a50SLinus Walleij 
26628d318a50SLinus Walleij static void __init d40_hw_init(struct d40_base *base)
26638d318a50SLinus Walleij {
26648d318a50SLinus Walleij 
26658d318a50SLinus Walleij 	static const struct d40_reg_val dma_init_reg[] = {
26668d318a50SLinus Walleij 		/* Clock every part of the DMA block from start */
26678d318a50SLinus Walleij 		{ .reg = D40_DREG_GCC,    .val = 0x0000ff01},
26688d318a50SLinus Walleij 
26698d318a50SLinus Walleij 		/* Interrupts on all logical channels */
26708d318a50SLinus Walleij 		{ .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
26718d318a50SLinus Walleij 		{ .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
26728d318a50SLinus Walleij 		{ .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
26738d318a50SLinus Walleij 		{ .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
26748d318a50SLinus Walleij 		{ .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
26758d318a50SLinus Walleij 		{ .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
26768d318a50SLinus Walleij 		{ .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
26778d318a50SLinus Walleij 		{ .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
26788d318a50SLinus Walleij 		{ .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
26798d318a50SLinus Walleij 		{ .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
26808d318a50SLinus Walleij 		{ .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
26818d318a50SLinus Walleij 		{ .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
26828d318a50SLinus Walleij 	};
26838d318a50SLinus Walleij 	int i;
26848d318a50SLinus Walleij 	u32 prmseo[2] = {0, 0};
26858d318a50SLinus Walleij 	u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
26868d318a50SLinus Walleij 	u32 pcmis = 0;
26878d318a50SLinus Walleij 	u32 pcicr = 0;
26888d318a50SLinus Walleij 
26898d318a50SLinus Walleij 	for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
26908d318a50SLinus Walleij 		writel(dma_init_reg[i].val,
26918d318a50SLinus Walleij 		       base->virtbase + dma_init_reg[i].reg);
26928d318a50SLinus Walleij 
26938d318a50SLinus Walleij 	/* Configure all our dma channels to default settings */
26948d318a50SLinus Walleij 	for (i = 0; i < base->num_phy_chans; i++) {
26958d318a50SLinus Walleij 
26968d318a50SLinus Walleij 		activeo[i % 2] = activeo[i % 2] << 2;
26978d318a50SLinus Walleij 
26988d318a50SLinus Walleij 		if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
26998d318a50SLinus Walleij 		    == D40_ALLOC_PHY) {
27008d318a50SLinus Walleij 			activeo[i % 2] |= 3;
27018d318a50SLinus Walleij 			continue;
27028d318a50SLinus Walleij 		}
27038d318a50SLinus Walleij 
27048d318a50SLinus Walleij 		/* Enable interrupt # */
27058d318a50SLinus Walleij 		pcmis = (pcmis << 1) | 1;
27068d318a50SLinus Walleij 
27078d318a50SLinus Walleij 		/* Clear interrupt # */
27088d318a50SLinus Walleij 		pcicr = (pcicr << 1) | 1;
27098d318a50SLinus Walleij 
27108d318a50SLinus Walleij 		/* Set channel to physical mode */
27118d318a50SLinus Walleij 		prmseo[i % 2] = prmseo[i % 2] << 2;
27128d318a50SLinus Walleij 		prmseo[i % 2] |= 1;
27138d318a50SLinus Walleij 
27148d318a50SLinus Walleij 	}
27158d318a50SLinus Walleij 
27168d318a50SLinus Walleij 	writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
27178d318a50SLinus Walleij 	writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
27188d318a50SLinus Walleij 	writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
27198d318a50SLinus Walleij 	writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
27208d318a50SLinus Walleij 
27218d318a50SLinus Walleij 	/* Write which interrupt to enable */
27228d318a50SLinus Walleij 	writel(pcmis, base->virtbase + D40_DREG_PCMIS);
27238d318a50SLinus Walleij 
27248d318a50SLinus Walleij 	/* Write which interrupt to clear */
27258d318a50SLinus Walleij 	writel(pcicr, base->virtbase + D40_DREG_PCICR);
27268d318a50SLinus Walleij 
27278d318a50SLinus Walleij }
27288d318a50SLinus Walleij 
2729508849adSLinus Walleij static int __init d40_lcla_allocate(struct d40_base *base)
2730508849adSLinus Walleij {
2731508849adSLinus Walleij 	unsigned long *page_list;
2732508849adSLinus Walleij 	int i, j;
2733508849adSLinus Walleij 	int ret = 0;
2734508849adSLinus Walleij 
2735508849adSLinus Walleij 	/*
2736508849adSLinus Walleij 	 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
2737508849adSLinus Walleij 	 * To full fill this hardware requirement without wasting 256 kb
2738508849adSLinus Walleij 	 * we allocate pages until we get an aligned one.
2739508849adSLinus Walleij 	 */
2740508849adSLinus Walleij 	page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
2741508849adSLinus Walleij 			    GFP_KERNEL);
2742508849adSLinus Walleij 
2743508849adSLinus Walleij 	if (!page_list) {
2744508849adSLinus Walleij 		ret = -ENOMEM;
2745508849adSLinus Walleij 		goto failure;
2746508849adSLinus Walleij 	}
2747508849adSLinus Walleij 
2748508849adSLinus Walleij 	/* Calculating how many pages that are required */
2749508849adSLinus Walleij 	base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
2750508849adSLinus Walleij 
2751508849adSLinus Walleij 	for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
2752508849adSLinus Walleij 		page_list[i] = __get_free_pages(GFP_KERNEL,
2753508849adSLinus Walleij 						base->lcla_pool.pages);
2754508849adSLinus Walleij 		if (!page_list[i]) {
2755508849adSLinus Walleij 
2756508849adSLinus Walleij 			dev_err(base->dev,
2757508849adSLinus Walleij 				"[%s] Failed to allocate %d pages.\n",
2758508849adSLinus Walleij 				__func__, base->lcla_pool.pages);
2759508849adSLinus Walleij 
2760508849adSLinus Walleij 			for (j = 0; j < i; j++)
2761508849adSLinus Walleij 				free_pages(page_list[j], base->lcla_pool.pages);
2762508849adSLinus Walleij 			goto failure;
2763508849adSLinus Walleij 		}
2764508849adSLinus Walleij 
2765508849adSLinus Walleij 		if ((virt_to_phys((void *)page_list[i]) &
2766508849adSLinus Walleij 		     (LCLA_ALIGNMENT - 1)) == 0)
2767508849adSLinus Walleij 			break;
2768508849adSLinus Walleij 	}
2769508849adSLinus Walleij 
2770508849adSLinus Walleij 	for (j = 0; j < i; j++)
2771508849adSLinus Walleij 		free_pages(page_list[j], base->lcla_pool.pages);
2772508849adSLinus Walleij 
2773508849adSLinus Walleij 	if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
2774508849adSLinus Walleij 		base->lcla_pool.base = (void *)page_list[i];
2775508849adSLinus Walleij 	} else {
2776767a9675SJonas Aaberg 		/*
2777767a9675SJonas Aaberg 		 * After many attempts and no succees with finding the correct
2778767a9675SJonas Aaberg 		 * alignment, try with allocating a big buffer.
2779767a9675SJonas Aaberg 		 */
2780508849adSLinus Walleij 		dev_warn(base->dev,
2781508849adSLinus Walleij 			 "[%s] Failed to get %d pages @ 18 bit align.\n",
2782508849adSLinus Walleij 			 __func__, base->lcla_pool.pages);
2783508849adSLinus Walleij 		base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
2784508849adSLinus Walleij 							 base->num_phy_chans +
2785508849adSLinus Walleij 							 LCLA_ALIGNMENT,
2786508849adSLinus Walleij 							 GFP_KERNEL);
2787508849adSLinus Walleij 		if (!base->lcla_pool.base_unaligned) {
2788508849adSLinus Walleij 			ret = -ENOMEM;
2789508849adSLinus Walleij 			goto failure;
2790508849adSLinus Walleij 		}
2791508849adSLinus Walleij 
2792508849adSLinus Walleij 		base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
2793508849adSLinus Walleij 						 LCLA_ALIGNMENT);
2794508849adSLinus Walleij 	}
2795508849adSLinus Walleij 
2796508849adSLinus Walleij 	writel(virt_to_phys(base->lcla_pool.base),
2797508849adSLinus Walleij 	       base->virtbase + D40_DREG_LCLA);
2798508849adSLinus Walleij failure:
2799508849adSLinus Walleij 	kfree(page_list);
2800508849adSLinus Walleij 	return ret;
2801508849adSLinus Walleij }
2802508849adSLinus Walleij 
28038d318a50SLinus Walleij static int __init d40_probe(struct platform_device *pdev)
28048d318a50SLinus Walleij {
28058d318a50SLinus Walleij 	int err;
28068d318a50SLinus Walleij 	int ret = -ENOENT;
28078d318a50SLinus Walleij 	struct d40_base *base;
28088d318a50SLinus Walleij 	struct resource *res = NULL;
28098d318a50SLinus Walleij 	int num_reserved_chans;
28108d318a50SLinus Walleij 	u32 val;
28118d318a50SLinus Walleij 
28128d318a50SLinus Walleij 	base = d40_hw_detect_init(pdev);
28138d318a50SLinus Walleij 
28148d318a50SLinus Walleij 	if (!base)
28158d318a50SLinus Walleij 		goto failure;
28168d318a50SLinus Walleij 
28178d318a50SLinus Walleij 	num_reserved_chans = d40_phy_res_init(base);
28188d318a50SLinus Walleij 
28198d318a50SLinus Walleij 	platform_set_drvdata(pdev, base);
28208d318a50SLinus Walleij 
28218d318a50SLinus Walleij 	spin_lock_init(&base->interrupt_lock);
28228d318a50SLinus Walleij 	spin_lock_init(&base->execmd_lock);
28238d318a50SLinus Walleij 
28248d318a50SLinus Walleij 	/* Get IO for logical channel parameter address */
28258d318a50SLinus Walleij 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
28268d318a50SLinus Walleij 	if (!res) {
28278d318a50SLinus Walleij 		ret = -ENOENT;
28288d318a50SLinus Walleij 		dev_err(&pdev->dev,
28298d318a50SLinus Walleij 			"[%s] No \"lcpa\" memory resource\n",
28308d318a50SLinus Walleij 			__func__);
28318d318a50SLinus Walleij 		goto failure;
28328d318a50SLinus Walleij 	}
28338d318a50SLinus Walleij 	base->lcpa_size = resource_size(res);
28348d318a50SLinus Walleij 	base->phy_lcpa = res->start;
28358d318a50SLinus Walleij 
28368d318a50SLinus Walleij 	if (request_mem_region(res->start, resource_size(res),
28378d318a50SLinus Walleij 			       D40_NAME " I/O lcpa") == NULL) {
28388d318a50SLinus Walleij 		ret = -EBUSY;
28398d318a50SLinus Walleij 		dev_err(&pdev->dev,
28408d318a50SLinus Walleij 			"[%s] Failed to request LCPA region 0x%x-0x%x\n",
28418d318a50SLinus Walleij 			__func__, res->start, res->end);
28428d318a50SLinus Walleij 		goto failure;
28438d318a50SLinus Walleij 	}
28448d318a50SLinus Walleij 
28458d318a50SLinus Walleij 	/* We make use of ESRAM memory for this. */
28468d318a50SLinus Walleij 	val = readl(base->virtbase + D40_DREG_LCPA);
28478d318a50SLinus Walleij 	if (res->start != val && val != 0) {
28488d318a50SLinus Walleij 		dev_warn(&pdev->dev,
28498d318a50SLinus Walleij 			 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
28508d318a50SLinus Walleij 			 __func__, val, res->start);
28518d318a50SLinus Walleij 	} else
28528d318a50SLinus Walleij 		writel(res->start, base->virtbase + D40_DREG_LCPA);
28538d318a50SLinus Walleij 
28548d318a50SLinus Walleij 	base->lcpa_base = ioremap(res->start, resource_size(res));
28558d318a50SLinus Walleij 	if (!base->lcpa_base) {
28568d318a50SLinus Walleij 		ret = -ENOMEM;
28578d318a50SLinus Walleij 		dev_err(&pdev->dev,
28588d318a50SLinus Walleij 			"[%s] Failed to ioremap LCPA region\n",
28598d318a50SLinus Walleij 			__func__);
28608d318a50SLinus Walleij 		goto failure;
28618d318a50SLinus Walleij 	}
2862508849adSLinus Walleij 
2863508849adSLinus Walleij 	ret = d40_lcla_allocate(base);
2864508849adSLinus Walleij 	if (ret) {
2865508849adSLinus Walleij 		dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n",
28668d318a50SLinus Walleij 			__func__);
28678d318a50SLinus Walleij 		goto failure;
28688d318a50SLinus Walleij 	}
28698d318a50SLinus Walleij 
28708d318a50SLinus Walleij 	spin_lock_init(&base->lcla_pool.lock);
28718d318a50SLinus Walleij 
28728d318a50SLinus Walleij 	base->irq = platform_get_irq(pdev, 0);
28738d318a50SLinus Walleij 
28748d318a50SLinus Walleij 	ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
28758d318a50SLinus Walleij 
28768d318a50SLinus Walleij 	if (ret) {
28778d318a50SLinus Walleij 		dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
28788d318a50SLinus Walleij 		goto failure;
28798d318a50SLinus Walleij 	}
28808d318a50SLinus Walleij 
28818d318a50SLinus Walleij 	err = d40_dmaengine_init(base, num_reserved_chans);
28828d318a50SLinus Walleij 	if (err)
28838d318a50SLinus Walleij 		goto failure;
28848d318a50SLinus Walleij 
28858d318a50SLinus Walleij 	d40_hw_init(base);
28868d318a50SLinus Walleij 
28878d318a50SLinus Walleij 	dev_info(base->dev, "initialized\n");
28888d318a50SLinus Walleij 	return 0;
28898d318a50SLinus Walleij 
28908d318a50SLinus Walleij failure:
28918d318a50SLinus Walleij 	if (base) {
2892c675b1b4SJonas Aaberg 		if (base->desc_slab)
2893c675b1b4SJonas Aaberg 			kmem_cache_destroy(base->desc_slab);
28948d318a50SLinus Walleij 		if (base->virtbase)
28958d318a50SLinus Walleij 			iounmap(base->virtbase);
2896508849adSLinus Walleij 		if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
2897508849adSLinus Walleij 			free_pages((unsigned long)base->lcla_pool.base,
2898508849adSLinus Walleij 				   base->lcla_pool.pages);
2899767a9675SJonas Aaberg 
2900508849adSLinus Walleij 		kfree(base->lcla_pool.base_unaligned);
2901767a9675SJonas Aaberg 
29028d318a50SLinus Walleij 		if (base->phy_lcpa)
29038d318a50SLinus Walleij 			release_mem_region(base->phy_lcpa,
29048d318a50SLinus Walleij 					   base->lcpa_size);
29058d318a50SLinus Walleij 		if (base->phy_start)
29068d318a50SLinus Walleij 			release_mem_region(base->phy_start,
29078d318a50SLinus Walleij 					   base->phy_size);
29088d318a50SLinus Walleij 		if (base->clk) {
29098d318a50SLinus Walleij 			clk_disable(base->clk);
29108d318a50SLinus Walleij 			clk_put(base->clk);
29118d318a50SLinus Walleij 		}
29128d318a50SLinus Walleij 
29138d318a50SLinus Walleij 		kfree(base->lcla_pool.alloc_map);
29148d318a50SLinus Walleij 		kfree(base->lookup_log_chans);
29158d318a50SLinus Walleij 		kfree(base->lookup_phy_chans);
29168d318a50SLinus Walleij 		kfree(base->phy_res);
29178d318a50SLinus Walleij 		kfree(base);
29188d318a50SLinus Walleij 	}
29198d318a50SLinus Walleij 
29208d318a50SLinus Walleij 	dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
29218d318a50SLinus Walleij 	return ret;
29228d318a50SLinus Walleij }
29238d318a50SLinus Walleij 
29248d318a50SLinus Walleij static struct platform_driver d40_driver = {
29258d318a50SLinus Walleij 	.driver = {
29268d318a50SLinus Walleij 		.owner = THIS_MODULE,
29278d318a50SLinus Walleij 		.name  = D40_NAME,
29288d318a50SLinus Walleij 	},
29298d318a50SLinus Walleij };
29308d318a50SLinus Walleij 
29318d318a50SLinus Walleij int __init stedma40_init(void)
29328d318a50SLinus Walleij {
29338d318a50SLinus Walleij 	return platform_driver_probe(&d40_driver, d40_probe);
29348d318a50SLinus Walleij }
29358d318a50SLinus Walleij arch_initcall(stedma40_init);
2936