xref: /linux/drivers/spi/spi-geni-qcom.c (revision 74b86d6af81be73bb74995ebeba74417e84b6b6f)
1561de45fSGirish Mahadevan // SPDX-License-Identifier: GPL-2.0
2561de45fSGirish Mahadevan // Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
3561de45fSGirish Mahadevan 
4561de45fSGirish Mahadevan #include <linux/clk.h>
5b59c1224SVinod Koul #include <linux/dmaengine.h>
6b59c1224SVinod Koul #include <linux/dma-mapping.h>
7b59c1224SVinod Koul #include <linux/dma/qcom-gpi-dma.h>
8561de45fSGirish Mahadevan #include <linux/interrupt.h>
9561de45fSGirish Mahadevan #include <linux/io.h>
10561de45fSGirish Mahadevan #include <linux/log2.h>
11561de45fSGirish Mahadevan #include <linux/module.h>
12561de45fSGirish Mahadevan #include <linux/platform_device.h>
131a9e489eSRajendra Nayak #include <linux/pm_opp.h>
14561de45fSGirish Mahadevan #include <linux/pm_runtime.h>
15561de45fSGirish Mahadevan #include <linux/qcom-geni-se.h>
16561de45fSGirish Mahadevan #include <linux/spi/spi.h>
17561de45fSGirish Mahadevan #include <linux/spinlock.h>
18561de45fSGirish Mahadevan 
19561de45fSGirish Mahadevan /* SPI SE specific registers and respective register fields */
20561de45fSGirish Mahadevan #define SE_SPI_CPHA		0x224
21561de45fSGirish Mahadevan #define CPHA			BIT(0)
22561de45fSGirish Mahadevan 
23561de45fSGirish Mahadevan #define SE_SPI_LOOPBACK		0x22c
24561de45fSGirish Mahadevan #define LOOPBACK_ENABLE		0x1
25561de45fSGirish Mahadevan #define NORMAL_MODE		0x0
26561de45fSGirish Mahadevan #define LOOPBACK_MSK		GENMASK(1, 0)
27561de45fSGirish Mahadevan 
28561de45fSGirish Mahadevan #define SE_SPI_CPOL		0x230
29561de45fSGirish Mahadevan #define CPOL			BIT(2)
30561de45fSGirish Mahadevan 
31561de45fSGirish Mahadevan #define SE_SPI_DEMUX_OUTPUT_INV	0x24c
32561de45fSGirish Mahadevan #define CS_DEMUX_OUTPUT_INV_MSK	GENMASK(3, 0)
33561de45fSGirish Mahadevan 
34561de45fSGirish Mahadevan #define SE_SPI_DEMUX_SEL	0x250
35561de45fSGirish Mahadevan #define CS_DEMUX_OUTPUT_SEL	GENMASK(3, 0)
36561de45fSGirish Mahadevan 
37561de45fSGirish Mahadevan #define SE_SPI_TRANS_CFG	0x25c
38561de45fSGirish Mahadevan #define CS_TOGGLE		BIT(0)
39561de45fSGirish Mahadevan 
40561de45fSGirish Mahadevan #define SE_SPI_WORD_LEN		0x268
41561de45fSGirish Mahadevan #define WORD_LEN_MSK		GENMASK(9, 0)
42561de45fSGirish Mahadevan #define MIN_WORD_LEN		4
43561de45fSGirish Mahadevan 
44561de45fSGirish Mahadevan #define SE_SPI_TX_TRANS_LEN	0x26c
45561de45fSGirish Mahadevan #define SE_SPI_RX_TRANS_LEN	0x270
46561de45fSGirish Mahadevan #define TRANS_LEN_MSK		GENMASK(23, 0)
47561de45fSGirish Mahadevan 
48561de45fSGirish Mahadevan #define SE_SPI_PRE_POST_CMD_DLY	0x274
49561de45fSGirish Mahadevan 
50561de45fSGirish Mahadevan #define SE_SPI_DELAY_COUNTERS	0x278
51561de45fSGirish Mahadevan #define SPI_INTER_WORDS_DELAY_MSK	GENMASK(9, 0)
52561de45fSGirish Mahadevan #define SPI_CS_CLK_DELAY_MSK		GENMASK(19, 10)
53561de45fSGirish Mahadevan #define SPI_CS_CLK_DELAY_SHFT		10
54561de45fSGirish Mahadevan 
55561de45fSGirish Mahadevan /* M_CMD OP codes for SPI */
56561de45fSGirish Mahadevan #define SPI_TX_ONLY		1
57561de45fSGirish Mahadevan #define SPI_RX_ONLY		2
58561de45fSGirish Mahadevan #define SPI_TX_RX		7
59561de45fSGirish Mahadevan #define SPI_CS_ASSERT		8
60561de45fSGirish Mahadevan #define SPI_CS_DEASSERT		9
61561de45fSGirish Mahadevan #define SPI_SCK_ONLY		10
62561de45fSGirish Mahadevan /* M_CMD params for SPI */
63561de45fSGirish Mahadevan #define SPI_PRE_CMD_DELAY	BIT(0)
64561de45fSGirish Mahadevan #define TIMESTAMP_BEFORE	BIT(1)
65561de45fSGirish Mahadevan #define FRAGMENTATION		BIT(2)
66561de45fSGirish Mahadevan #define TIMESTAMP_AFTER		BIT(3)
67561de45fSGirish Mahadevan #define POST_CMD_DELAY		BIT(4)
68561de45fSGirish Mahadevan 
69b59c1224SVinod Koul #define GSI_LOOPBACK_EN		BIT(0)
70b59c1224SVinod Koul #define GSI_CS_TOGGLE		BIT(3)
71b59c1224SVinod Koul #define GSI_CPHA		BIT(4)
72b59c1224SVinod Koul #define GSI_CPOL		BIT(5)
73b59c1224SVinod Koul 
74561de45fSGirish Mahadevan struct spi_geni_master {
75561de45fSGirish Mahadevan 	struct geni_se se;
76561de45fSGirish Mahadevan 	struct device *dev;
77561de45fSGirish Mahadevan 	u32 tx_fifo_depth;
78561de45fSGirish Mahadevan 	u32 fifo_width_bits;
79561de45fSGirish Mahadevan 	u32 tx_wm;
80da48dc8cSDouglas Anderson 	u32 last_mode;
81561de45fSGirish Mahadevan 	unsigned long cur_speed_hz;
825f219524SDouglas Anderson 	unsigned long cur_sclk_hz;
83561de45fSGirish Mahadevan 	unsigned int cur_bits_per_word;
84561de45fSGirish Mahadevan 	unsigned int tx_rem_bytes;
85561de45fSGirish Mahadevan 	unsigned int rx_rem_bytes;
86561de45fSGirish Mahadevan 	const struct spi_transfer *cur_xfer;
877ba9bdcbSDouglas Anderson 	struct completion cs_done;
887ba9bdcbSDouglas Anderson 	struct completion cancel_done;
897ba9bdcbSDouglas Anderson 	struct completion abort_done;
90561de45fSGirish Mahadevan 	unsigned int oversampling;
91561de45fSGirish Mahadevan 	spinlock_t lock;
92561de45fSGirish Mahadevan 	int irq;
93638d8488SDouglas Anderson 	bool cs_flag;
94690d8b91SDouglas Anderson 	bool abort_failed;
95b59c1224SVinod Koul 	struct dma_chan *tx;
96b59c1224SVinod Koul 	struct dma_chan *rx;
97b59c1224SVinod Koul 	int cur_xfer_mode;
98561de45fSGirish Mahadevan };
99561de45fSGirish Mahadevan 
100561de45fSGirish Mahadevan static int get_spi_clk_cfg(unsigned int speed_hz,
101561de45fSGirish Mahadevan 			struct spi_geni_master *mas,
102561de45fSGirish Mahadevan 			unsigned int *clk_idx,
103561de45fSGirish Mahadevan 			unsigned int *clk_div)
104561de45fSGirish Mahadevan {
105561de45fSGirish Mahadevan 	unsigned long sclk_freq;
106561de45fSGirish Mahadevan 	unsigned int actual_hz;
107561de45fSGirish Mahadevan 	int ret;
108561de45fSGirish Mahadevan 
109561de45fSGirish Mahadevan 	ret = geni_se_clk_freq_match(&mas->se,
110561de45fSGirish Mahadevan 				speed_hz * mas->oversampling,
111561de45fSGirish Mahadevan 				clk_idx, &sclk_freq, false);
112561de45fSGirish Mahadevan 	if (ret) {
113561de45fSGirish Mahadevan 		dev_err(mas->dev, "Failed(%d) to find src clk for %dHz\n",
114561de45fSGirish Mahadevan 							ret, speed_hz);
115561de45fSGirish Mahadevan 		return ret;
116561de45fSGirish Mahadevan 	}
117561de45fSGirish Mahadevan 
118561de45fSGirish Mahadevan 	*clk_div = DIV_ROUND_UP(sclk_freq, mas->oversampling * speed_hz);
119561de45fSGirish Mahadevan 	actual_hz = sclk_freq / (mas->oversampling * *clk_div);
120561de45fSGirish Mahadevan 
121561de45fSGirish Mahadevan 	dev_dbg(mas->dev, "req %u=>%u sclk %lu, idx %d, div %d\n", speed_hz,
122561de45fSGirish Mahadevan 				actual_hz, sclk_freq, *clk_idx, *clk_div);
1231a9e489eSRajendra Nayak 	ret = dev_pm_opp_set_rate(mas->dev, sclk_freq);
124561de45fSGirish Mahadevan 	if (ret)
1251a9e489eSRajendra Nayak 		dev_err(mas->dev, "dev_pm_opp_set_rate failed %d\n", ret);
1265f219524SDouglas Anderson 	else
1275f219524SDouglas Anderson 		mas->cur_sclk_hz = sclk_freq;
1285f219524SDouglas Anderson 
129561de45fSGirish Mahadevan 	return ret;
130561de45fSGirish Mahadevan }
131561de45fSGirish Mahadevan 
132de43affeSStephen Boyd static void handle_fifo_timeout(struct spi_master *spi,
133de43affeSStephen Boyd 				struct spi_message *msg)
134de43affeSStephen Boyd {
135de43affeSStephen Boyd 	struct spi_geni_master *mas = spi_master_get_devdata(spi);
136539afdf9SDouglas Anderson 	unsigned long time_left;
137de43affeSStephen Boyd 	struct geni_se *se = &mas->se;
138de43affeSStephen Boyd 
139539afdf9SDouglas Anderson 	spin_lock_irq(&mas->lock);
1407ba9bdcbSDouglas Anderson 	reinit_completion(&mas->cancel_done);
141de43affeSStephen Boyd 	writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
1427ba9bdcbSDouglas Anderson 	mas->cur_xfer = NULL;
1437ba9bdcbSDouglas Anderson 	geni_se_cancel_m_cmd(se);
144539afdf9SDouglas Anderson 	spin_unlock_irq(&mas->lock);
1457ba9bdcbSDouglas Anderson 
1467ba9bdcbSDouglas Anderson 	time_left = wait_for_completion_timeout(&mas->cancel_done, HZ);
147de43affeSStephen Boyd 	if (time_left)
148de43affeSStephen Boyd 		return;
149de43affeSStephen Boyd 
150539afdf9SDouglas Anderson 	spin_lock_irq(&mas->lock);
1517ba9bdcbSDouglas Anderson 	reinit_completion(&mas->abort_done);
152de43affeSStephen Boyd 	geni_se_abort_m_cmd(se);
153539afdf9SDouglas Anderson 	spin_unlock_irq(&mas->lock);
1547ba9bdcbSDouglas Anderson 
1557ba9bdcbSDouglas Anderson 	time_left = wait_for_completion_timeout(&mas->abort_done, HZ);
156690d8b91SDouglas Anderson 	if (!time_left) {
157de43affeSStephen Boyd 		dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
158690d8b91SDouglas Anderson 
159690d8b91SDouglas Anderson 		/*
160690d8b91SDouglas Anderson 		 * No need for a lock since SPI core has a lock and we never
161690d8b91SDouglas Anderson 		 * access this from an interrupt.
162690d8b91SDouglas Anderson 		 */
163690d8b91SDouglas Anderson 		mas->abort_failed = true;
164690d8b91SDouglas Anderson 	}
165690d8b91SDouglas Anderson }
166690d8b91SDouglas Anderson 
167690d8b91SDouglas Anderson static bool spi_geni_is_abort_still_pending(struct spi_geni_master *mas)
168690d8b91SDouglas Anderson {
169690d8b91SDouglas Anderson 	struct geni_se *se = &mas->se;
170690d8b91SDouglas Anderson 	u32 m_irq, m_irq_en;
171690d8b91SDouglas Anderson 
172690d8b91SDouglas Anderson 	if (!mas->abort_failed)
173690d8b91SDouglas Anderson 		return false;
174690d8b91SDouglas Anderson 
175690d8b91SDouglas Anderson 	/*
176690d8b91SDouglas Anderson 	 * The only known case where a transfer times out and then a cancel
177690d8b91SDouglas Anderson 	 * times out then an abort times out is if something is blocking our
178690d8b91SDouglas Anderson 	 * interrupt handler from running.  Avoid starting any new transfers
179690d8b91SDouglas Anderson 	 * until that sorts itself out.
180690d8b91SDouglas Anderson 	 */
181690d8b91SDouglas Anderson 	spin_lock_irq(&mas->lock);
182690d8b91SDouglas Anderson 	m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
183690d8b91SDouglas Anderson 	m_irq_en = readl(se->base + SE_GENI_M_IRQ_EN);
184690d8b91SDouglas Anderson 	spin_unlock_irq(&mas->lock);
185690d8b91SDouglas Anderson 
186690d8b91SDouglas Anderson 	if (m_irq & m_irq_en) {
187690d8b91SDouglas Anderson 		dev_err(mas->dev, "Interrupts pending after abort: %#010x\n",
188690d8b91SDouglas Anderson 			m_irq & m_irq_en);
189690d8b91SDouglas Anderson 		return true;
190690d8b91SDouglas Anderson 	}
191690d8b91SDouglas Anderson 
192690d8b91SDouglas Anderson 	/*
193690d8b91SDouglas Anderson 	 * If we're here the problem resolved itself so no need to check more
194690d8b91SDouglas Anderson 	 * on future transfers.
195690d8b91SDouglas Anderson 	 */
196690d8b91SDouglas Anderson 	mas->abort_failed = false;
197690d8b91SDouglas Anderson 
198690d8b91SDouglas Anderson 	return false;
199de43affeSStephen Boyd }
200de43affeSStephen Boyd 
201561de45fSGirish Mahadevan static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
202561de45fSGirish Mahadevan {
203561de45fSGirish Mahadevan 	struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
204561de45fSGirish Mahadevan 	struct spi_master *spi = dev_get_drvdata(mas->dev);
205561de45fSGirish Mahadevan 	struct geni_se *se = &mas->se;
2060dccff3cSAlok Chauhan 	unsigned long time_left;
207561de45fSGirish Mahadevan 
208561de45fSGirish Mahadevan 	if (!(slv->mode & SPI_CS_HIGH))
209561de45fSGirish Mahadevan 		set_flag = !set_flag;
210561de45fSGirish Mahadevan 
211638d8488SDouglas Anderson 	if (set_flag == mas->cs_flag)
212638d8488SDouglas Anderson 		return;
213638d8488SDouglas Anderson 
214690d8b91SDouglas Anderson 	pm_runtime_get_sync(mas->dev);
215690d8b91SDouglas Anderson 
216690d8b91SDouglas Anderson 	if (spi_geni_is_abort_still_pending(mas)) {
217690d8b91SDouglas Anderson 		dev_err(mas->dev, "Can't set chip select\n");
218690d8b91SDouglas Anderson 		goto exit;
219690d8b91SDouglas Anderson 	}
220690d8b91SDouglas Anderson 
2212ee471a1SDouglas Anderson 	spin_lock_irq(&mas->lock);
2223d7d916fSDouglas Anderson 	if (mas->cur_xfer) {
2233d7d916fSDouglas Anderson 		dev_err(mas->dev, "Can't set CS when prev xfer running\n");
2243d7d916fSDouglas Anderson 		spin_unlock_irq(&mas->lock);
2253d7d916fSDouglas Anderson 		goto exit;
2263d7d916fSDouglas Anderson 	}
2273d7d916fSDouglas Anderson 
2283d7d916fSDouglas Anderson 	mas->cs_flag = set_flag;
2297ba9bdcbSDouglas Anderson 	reinit_completion(&mas->cs_done);
230561de45fSGirish Mahadevan 	if (set_flag)
231561de45fSGirish Mahadevan 		geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0);
232561de45fSGirish Mahadevan 	else
233561de45fSGirish Mahadevan 		geni_se_setup_m_cmd(se, SPI_CS_DEASSERT, 0);
2342ee471a1SDouglas Anderson 	spin_unlock_irq(&mas->lock);
235561de45fSGirish Mahadevan 
2367ba9bdcbSDouglas Anderson 	time_left = wait_for_completion_timeout(&mas->cs_done, HZ);
23717fa81aaSDouglas Anderson 	if (!time_left) {
23817fa81aaSDouglas Anderson 		dev_warn(mas->dev, "Timeout setting chip select\n");
239561de45fSGirish Mahadevan 		handle_fifo_timeout(spi, NULL);
24017fa81aaSDouglas Anderson 	}
241561de45fSGirish Mahadevan 
242690d8b91SDouglas Anderson exit:
243561de45fSGirish Mahadevan 	pm_runtime_put(mas->dev);
244561de45fSGirish Mahadevan }
245561de45fSGirish Mahadevan 
246561de45fSGirish Mahadevan static void spi_setup_word_len(struct spi_geni_master *mas, u16 mode,
247561de45fSGirish Mahadevan 					unsigned int bits_per_word)
248561de45fSGirish Mahadevan {
249561de45fSGirish Mahadevan 	unsigned int pack_words;
250561de45fSGirish Mahadevan 	bool msb_first = (mode & SPI_LSB_FIRST) ? false : true;
251561de45fSGirish Mahadevan 	struct geni_se *se = &mas->se;
252561de45fSGirish Mahadevan 	u32 word_len;
253561de45fSGirish Mahadevan 
254561de45fSGirish Mahadevan 	/*
255561de45fSGirish Mahadevan 	 * If bits_per_word isn't a byte aligned value, set the packing to be
256561de45fSGirish Mahadevan 	 * 1 SPI word per FIFO word.
257561de45fSGirish Mahadevan 	 */
258561de45fSGirish Mahadevan 	if (!(mas->fifo_width_bits % bits_per_word))
259561de45fSGirish Mahadevan 		pack_words = mas->fifo_width_bits / bits_per_word;
260561de45fSGirish Mahadevan 	else
261561de45fSGirish Mahadevan 		pack_words = 1;
262561de45fSGirish Mahadevan 	geni_se_config_packing(&mas->se, bits_per_word, pack_words, msb_first,
263561de45fSGirish Mahadevan 								true, true);
264da48dc8cSDouglas Anderson 	word_len = (bits_per_word - MIN_WORD_LEN) & WORD_LEN_MSK;
265561de45fSGirish Mahadevan 	writel(word_len, se->base + SE_SPI_WORD_LEN);
266561de45fSGirish Mahadevan }
267561de45fSGirish Mahadevan 
2680e3b8a81SAkash Asthana static int geni_spi_set_clock_and_bw(struct spi_geni_master *mas,
2690e3b8a81SAkash Asthana 					unsigned long clk_hz)
270e68b6624SDouglas Anderson {
271e68b6624SDouglas Anderson 	u32 clk_sel, m_clk_cfg, idx, div;
272e68b6624SDouglas Anderson 	struct geni_se *se = &mas->se;
273e68b6624SDouglas Anderson 	int ret;
274e68b6624SDouglas Anderson 
27568890e20SDouglas Anderson 	if (clk_hz == mas->cur_speed_hz)
27668890e20SDouglas Anderson 		return 0;
27768890e20SDouglas Anderson 
278e68b6624SDouglas Anderson 	ret = get_spi_clk_cfg(clk_hz, mas, &idx, &div);
279e68b6624SDouglas Anderson 	if (ret) {
280e68b6624SDouglas Anderson 		dev_err(mas->dev, "Err setting clk to %lu: %d\n", clk_hz, ret);
281e68b6624SDouglas Anderson 		return ret;
282e68b6624SDouglas Anderson 	}
283e68b6624SDouglas Anderson 
284e68b6624SDouglas Anderson 	/*
285e68b6624SDouglas Anderson 	 * SPI core clock gets configured with the requested frequency
286e68b6624SDouglas Anderson 	 * or the frequency closer to the requested frequency.
287e68b6624SDouglas Anderson 	 * For that reason requested frequency is stored in the
288e68b6624SDouglas Anderson 	 * cur_speed_hz and referred in the consecutive transfer instead
289e68b6624SDouglas Anderson 	 * of calling clk_get_rate() API.
290e68b6624SDouglas Anderson 	 */
291e68b6624SDouglas Anderson 	mas->cur_speed_hz = clk_hz;
292e68b6624SDouglas Anderson 
293e68b6624SDouglas Anderson 	clk_sel = idx & CLK_SEL_MSK;
294e68b6624SDouglas Anderson 	m_clk_cfg = (div << CLK_DIV_SHFT) | SER_CLK_EN;
295e68b6624SDouglas Anderson 	writel(clk_sel, se->base + SE_GENI_CLK_SEL);
296e68b6624SDouglas Anderson 	writel(m_clk_cfg, se->base + GENI_SER_M_CLK_CFG);
297e68b6624SDouglas Anderson 
2980e3b8a81SAkash Asthana 	/* Set BW quota for CPU as driver supports FIFO mode only. */
2990e3b8a81SAkash Asthana 	se->icc_paths[CPU_TO_GENI].avg_bw = Bps_to_icc(mas->cur_speed_hz);
3000e3b8a81SAkash Asthana 	ret = geni_icc_set_bw(se);
3010e3b8a81SAkash Asthana 	if (ret)
3020e3b8a81SAkash Asthana 		return ret;
3030e3b8a81SAkash Asthana 
304e68b6624SDouglas Anderson 	return 0;
305e68b6624SDouglas Anderson }
306e68b6624SDouglas Anderson 
307561de45fSGirish Mahadevan static int setup_fifo_params(struct spi_device *spi_slv,
308561de45fSGirish Mahadevan 					struct spi_master *spi)
309561de45fSGirish Mahadevan {
310561de45fSGirish Mahadevan 	struct spi_geni_master *mas = spi_master_get_devdata(spi);
311561de45fSGirish Mahadevan 	struct geni_se *se = &mas->se;
312da48dc8cSDouglas Anderson 	u32 loopback_cfg = 0, cpol = 0, cpha = 0, demux_output_inv = 0;
313e68b6624SDouglas Anderson 	u32 demux_sel;
314561de45fSGirish Mahadevan 
315da48dc8cSDouglas Anderson 	if (mas->last_mode != spi_slv->mode) {
316561de45fSGirish Mahadevan 		if (spi_slv->mode & SPI_LOOP)
317da48dc8cSDouglas Anderson 			loopback_cfg = LOOPBACK_ENABLE;
318561de45fSGirish Mahadevan 
319561de45fSGirish Mahadevan 		if (spi_slv->mode & SPI_CPOL)
320da48dc8cSDouglas Anderson 			cpol = CPOL;
321561de45fSGirish Mahadevan 
322561de45fSGirish Mahadevan 		if (spi_slv->mode & SPI_CPHA)
323da48dc8cSDouglas Anderson 			cpha = CPHA;
324561de45fSGirish Mahadevan 
325561de45fSGirish Mahadevan 		if (spi_slv->mode & SPI_CS_HIGH)
326561de45fSGirish Mahadevan 			demux_output_inv = BIT(spi_slv->chip_select);
327561de45fSGirish Mahadevan 
328561de45fSGirish Mahadevan 		demux_sel = spi_slv->chip_select;
329561de45fSGirish Mahadevan 		mas->cur_bits_per_word = spi_slv->bits_per_word;
330561de45fSGirish Mahadevan 
331561de45fSGirish Mahadevan 		spi_setup_word_len(mas, spi_slv->mode, spi_slv->bits_per_word);
332561de45fSGirish Mahadevan 		writel(loopback_cfg, se->base + SE_SPI_LOOPBACK);
333561de45fSGirish Mahadevan 		writel(demux_sel, se->base + SE_SPI_DEMUX_SEL);
334561de45fSGirish Mahadevan 		writel(cpha, se->base + SE_SPI_CPHA);
335561de45fSGirish Mahadevan 		writel(cpol, se->base + SE_SPI_CPOL);
336561de45fSGirish Mahadevan 		writel(demux_output_inv, se->base + SE_SPI_DEMUX_OUTPUT_INV);
337e68b6624SDouglas Anderson 
338da48dc8cSDouglas Anderson 		mas->last_mode = spi_slv->mode;
339da48dc8cSDouglas Anderson 	}
340da48dc8cSDouglas Anderson 
3410e3b8a81SAkash Asthana 	return geni_spi_set_clock_and_bw(mas, spi_slv->max_speed_hz);
342561de45fSGirish Mahadevan }
343561de45fSGirish Mahadevan 
344b59c1224SVinod Koul static void
345b59c1224SVinod Koul spi_gsi_callback_result(void *cb, const struct dmaengine_result *result)
346b59c1224SVinod Koul {
347b59c1224SVinod Koul 	struct spi_master *spi = cb;
348b59c1224SVinod Koul 
349*74b86d6aSVinod Koul 	spi->cur_msg->status = -EIO;
350b59c1224SVinod Koul 	if (result->result != DMA_TRANS_NOERROR) {
351b59c1224SVinod Koul 		dev_err(&spi->dev, "DMA txn failed: %d\n", result->result);
352*74b86d6aSVinod Koul 		spi_finalize_current_transfer(spi);
353b59c1224SVinod Koul 		return;
354b59c1224SVinod Koul 	}
355b59c1224SVinod Koul 
356b59c1224SVinod Koul 	if (!result->residue) {
357*74b86d6aSVinod Koul 		spi->cur_msg->status = 0;
358b59c1224SVinod Koul 		dev_dbg(&spi->dev, "DMA txn completed\n");
359b59c1224SVinod Koul 	} else {
360b59c1224SVinod Koul 		dev_err(&spi->dev, "DMA xfer has pending: %d\n", result->residue);
361b59c1224SVinod Koul 	}
362*74b86d6aSVinod Koul 
363*74b86d6aSVinod Koul 	spi_finalize_current_transfer(spi);
364b59c1224SVinod Koul }
365b59c1224SVinod Koul 
366b59c1224SVinod Koul static int setup_gsi_xfer(struct spi_transfer *xfer, struct spi_geni_master *mas,
367b59c1224SVinod Koul 			  struct spi_device *spi_slv, struct spi_master *spi)
368b59c1224SVinod Koul {
369b59c1224SVinod Koul 	unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
370b59c1224SVinod Koul 	struct dma_slave_config config = {};
371b59c1224SVinod Koul 	struct gpi_spi_config peripheral = {};
372b59c1224SVinod Koul 	struct dma_async_tx_descriptor *tx_desc, *rx_desc;
373b59c1224SVinod Koul 	int ret;
374b59c1224SVinod Koul 
375b59c1224SVinod Koul 	config.peripheral_config = &peripheral;
376b59c1224SVinod Koul 	config.peripheral_size = sizeof(peripheral);
377b59c1224SVinod Koul 	peripheral.set_config = true;
378b59c1224SVinod Koul 
379b59c1224SVinod Koul 	if (xfer->bits_per_word != mas->cur_bits_per_word ||
380b59c1224SVinod Koul 	    xfer->speed_hz != mas->cur_speed_hz) {
381b59c1224SVinod Koul 		mas->cur_bits_per_word = xfer->bits_per_word;
382b59c1224SVinod Koul 		mas->cur_speed_hz = xfer->speed_hz;
383b59c1224SVinod Koul 	}
384b59c1224SVinod Koul 
385b59c1224SVinod Koul 	if (xfer->tx_buf && xfer->rx_buf) {
386b59c1224SVinod Koul 		peripheral.cmd = SPI_DUPLEX;
387b59c1224SVinod Koul 	} else if (xfer->tx_buf) {
388b59c1224SVinod Koul 		peripheral.cmd = SPI_TX;
389b59c1224SVinod Koul 		peripheral.rx_len = 0;
390b59c1224SVinod Koul 	} else if (xfer->rx_buf) {
391b59c1224SVinod Koul 		peripheral.cmd = SPI_RX;
392b59c1224SVinod Koul 		if (!(mas->cur_bits_per_word % MIN_WORD_LEN)) {
393b59c1224SVinod Koul 			peripheral.rx_len = ((xfer->len << 3) / mas->cur_bits_per_word);
394b59c1224SVinod Koul 		} else {
395b59c1224SVinod Koul 			int bytes_per_word = (mas->cur_bits_per_word / BITS_PER_BYTE) + 1;
396b59c1224SVinod Koul 
397b59c1224SVinod Koul 			peripheral.rx_len = (xfer->len / bytes_per_word);
398b59c1224SVinod Koul 		}
399b59c1224SVinod Koul 	}
400b59c1224SVinod Koul 
401b59c1224SVinod Koul 	peripheral.loopback_en = !!(spi_slv->mode & SPI_LOOP);
402b59c1224SVinod Koul 	peripheral.clock_pol_high = !!(spi_slv->mode & SPI_CPOL);
403b59c1224SVinod Koul 	peripheral.data_pol_high = !!(spi_slv->mode & SPI_CPHA);
404b59c1224SVinod Koul 	peripheral.cs = spi_slv->chip_select;
405b59c1224SVinod Koul 	peripheral.pack_en = true;
406b59c1224SVinod Koul 	peripheral.word_len = xfer->bits_per_word - MIN_WORD_LEN;
407b59c1224SVinod Koul 
408b59c1224SVinod Koul 	ret = get_spi_clk_cfg(mas->cur_speed_hz, mas,
409b59c1224SVinod Koul 			      &peripheral.clk_src, &peripheral.clk_div);
410b59c1224SVinod Koul 	if (ret) {
411b59c1224SVinod Koul 		dev_err(mas->dev, "Err in get_spi_clk_cfg() :%d\n", ret);
412b59c1224SVinod Koul 		return ret;
413b59c1224SVinod Koul 	}
414b59c1224SVinod Koul 
415b59c1224SVinod Koul 	if (!xfer->cs_change) {
416b59c1224SVinod Koul 		if (!list_is_last(&xfer->transfer_list, &spi->cur_msg->transfers))
417b59c1224SVinod Koul 			peripheral.fragmentation = FRAGMENTATION;
418b59c1224SVinod Koul 	}
419b59c1224SVinod Koul 
420b59c1224SVinod Koul 	if (peripheral.cmd & SPI_RX) {
421b59c1224SVinod Koul 		dmaengine_slave_config(mas->rx, &config);
422b59c1224SVinod Koul 		rx_desc = dmaengine_prep_slave_sg(mas->rx, xfer->rx_sg.sgl, xfer->rx_sg.nents,
423b59c1224SVinod Koul 						  DMA_DEV_TO_MEM, flags);
424b59c1224SVinod Koul 		if (!rx_desc) {
425b59c1224SVinod Koul 			dev_err(mas->dev, "Err setting up rx desc\n");
426b59c1224SVinod Koul 			return -EIO;
427b59c1224SVinod Koul 		}
428b59c1224SVinod Koul 	}
429b59c1224SVinod Koul 
430b59c1224SVinod Koul 	/*
431b59c1224SVinod Koul 	 * Prepare the TX always, even for RX or tx_buf being null, we would
432b59c1224SVinod Koul 	 * need TX to be prepared per GSI spec
433b59c1224SVinod Koul 	 */
434b59c1224SVinod Koul 	dmaengine_slave_config(mas->tx, &config);
435b59c1224SVinod Koul 	tx_desc = dmaengine_prep_slave_sg(mas->tx, xfer->tx_sg.sgl, xfer->tx_sg.nents,
436b59c1224SVinod Koul 					  DMA_MEM_TO_DEV, flags);
437b59c1224SVinod Koul 	if (!tx_desc) {
438b59c1224SVinod Koul 		dev_err(mas->dev, "Err setting up tx desc\n");
439b59c1224SVinod Koul 		return -EIO;
440b59c1224SVinod Koul 	}
441b59c1224SVinod Koul 
442b59c1224SVinod Koul 	tx_desc->callback_result = spi_gsi_callback_result;
443b59c1224SVinod Koul 	tx_desc->callback_param = spi;
444b59c1224SVinod Koul 
445b59c1224SVinod Koul 	if (peripheral.cmd & SPI_RX)
446b59c1224SVinod Koul 		dmaengine_submit(rx_desc);
447b59c1224SVinod Koul 	dmaengine_submit(tx_desc);
448b59c1224SVinod Koul 
449b59c1224SVinod Koul 	if (peripheral.cmd & SPI_RX)
450b59c1224SVinod Koul 		dma_async_issue_pending(mas->rx);
451b59c1224SVinod Koul 
452b59c1224SVinod Koul 	dma_async_issue_pending(mas->tx);
453b59c1224SVinod Koul 	return 1;
454b59c1224SVinod Koul }
455b59c1224SVinod Koul 
456b59c1224SVinod Koul static bool geni_can_dma(struct spi_controller *ctlr,
457b59c1224SVinod Koul 			 struct spi_device *slv, struct spi_transfer *xfer)
458b59c1224SVinod Koul {
459b59c1224SVinod Koul 	struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
460b59c1224SVinod Koul 
461b59c1224SVinod Koul 	/* check if dma is supported */
462b59c1224SVinod Koul 	return mas->cur_xfer_mode != GENI_SE_FIFO;
463b59c1224SVinod Koul }
464b59c1224SVinod Koul 
465561de45fSGirish Mahadevan static int spi_geni_prepare_message(struct spi_master *spi,
466561de45fSGirish Mahadevan 					struct spi_message *spi_msg)
467561de45fSGirish Mahadevan {
468561de45fSGirish Mahadevan 	struct spi_geni_master *mas = spi_master_get_devdata(spi);
469b59c1224SVinod Koul 	int ret;
470561de45fSGirish Mahadevan 
471b59c1224SVinod Koul 	switch (mas->cur_xfer_mode) {
472b59c1224SVinod Koul 	case GENI_SE_FIFO:
473690d8b91SDouglas Anderson 		if (spi_geni_is_abort_still_pending(mas))
474690d8b91SDouglas Anderson 			return -EBUSY;
475561de45fSGirish Mahadevan 		ret = setup_fifo_params(spi_msg->spi, spi);
476561de45fSGirish Mahadevan 		if (ret)
477561de45fSGirish Mahadevan 			dev_err(mas->dev, "Couldn't select mode %d\n", ret);
478561de45fSGirish Mahadevan 		return ret;
479b59c1224SVinod Koul 
480b59c1224SVinod Koul 	case GENI_GPI_DMA:
481b59c1224SVinod Koul 		/* nothing to do for GPI DMA */
482b59c1224SVinod Koul 		return 0;
483b59c1224SVinod Koul 	}
484b59c1224SVinod Koul 
485b59c1224SVinod Koul 	dev_err(mas->dev, "Mode not supported %d", mas->cur_xfer_mode);
486b59c1224SVinod Koul 	return -EINVAL;
487b59c1224SVinod Koul }
488b59c1224SVinod Koul 
489b59c1224SVinod Koul static int spi_geni_grab_gpi_chan(struct spi_geni_master *mas)
490b59c1224SVinod Koul {
491b59c1224SVinod Koul 	int ret;
492b59c1224SVinod Koul 
493b59c1224SVinod Koul 	mas->tx = dma_request_chan(mas->dev, "tx");
4946532582cSDan Carpenter 	if (IS_ERR(mas->tx)) {
4956532582cSDan Carpenter 		ret = dev_err_probe(mas->dev, PTR_ERR(mas->tx),
4966532582cSDan Carpenter 				    "Failed to get tx DMA ch\n");
497b59c1224SVinod Koul 		goto err_tx;
4986532582cSDan Carpenter 	}
499b59c1224SVinod Koul 
500b59c1224SVinod Koul 	mas->rx = dma_request_chan(mas->dev, "rx");
5016532582cSDan Carpenter 	if (IS_ERR(mas->rx)) {
5026532582cSDan Carpenter 		ret = dev_err_probe(mas->dev, PTR_ERR(mas->rx),
5036532582cSDan Carpenter 				    "Failed to get rx DMA ch\n");
504b59c1224SVinod Koul 		goto err_rx;
5056532582cSDan Carpenter 	}
506b59c1224SVinod Koul 
507b59c1224SVinod Koul 	return 0;
508b59c1224SVinod Koul 
509b59c1224SVinod Koul err_rx:
510b59c1224SVinod Koul 	mas->rx = NULL;
5116532582cSDan Carpenter 	dma_release_channel(mas->tx);
5126532582cSDan Carpenter err_tx:
5136532582cSDan Carpenter 	mas->tx = NULL;
514b59c1224SVinod Koul 	return ret;
515b59c1224SVinod Koul }
516b59c1224SVinod Koul 
517b59c1224SVinod Koul static void spi_geni_release_dma_chan(struct spi_geni_master *mas)
518b59c1224SVinod Koul {
519b59c1224SVinod Koul 	if (mas->rx) {
520b59c1224SVinod Koul 		dma_release_channel(mas->rx);
521b59c1224SVinod Koul 		mas->rx = NULL;
522b59c1224SVinod Koul 	}
523b59c1224SVinod Koul 
524b59c1224SVinod Koul 	if (mas->tx) {
525b59c1224SVinod Koul 		dma_release_channel(mas->tx);
526b59c1224SVinod Koul 		mas->tx = NULL;
527b59c1224SVinod Koul 	}
528561de45fSGirish Mahadevan }
529561de45fSGirish Mahadevan 
530561de45fSGirish Mahadevan static int spi_geni_init(struct spi_geni_master *mas)
531561de45fSGirish Mahadevan {
532561de45fSGirish Mahadevan 	struct geni_se *se = &mas->se;
533561de45fSGirish Mahadevan 	unsigned int proto, major, minor, ver;
534b59c1224SVinod Koul 	u32 spi_tx_cfg, fifo_disable;
535b59c1224SVinod Koul 	int ret = -ENXIO;
536561de45fSGirish Mahadevan 
537561de45fSGirish Mahadevan 	pm_runtime_get_sync(mas->dev);
538561de45fSGirish Mahadevan 
539561de45fSGirish Mahadevan 	proto = geni_se_read_proto(se);
540561de45fSGirish Mahadevan 	if (proto != GENI_SE_SPI) {
541561de45fSGirish Mahadevan 		dev_err(mas->dev, "Invalid proto %d\n", proto);
542b59c1224SVinod Koul 		goto out_pm;
543561de45fSGirish Mahadevan 	}
544561de45fSGirish Mahadevan 	mas->tx_fifo_depth = geni_se_get_tx_fifo_depth(se);
545561de45fSGirish Mahadevan 
546561de45fSGirish Mahadevan 	/* Width of Tx and Rx FIFO is same */
547561de45fSGirish Mahadevan 	mas->fifo_width_bits = geni_se_get_tx_fifo_width(se);
548561de45fSGirish Mahadevan 
549561de45fSGirish Mahadevan 	/*
550561de45fSGirish Mahadevan 	 * Hardware programming guide suggests to configure
551561de45fSGirish Mahadevan 	 * RX FIFO RFR level to fifo_depth-2.
552561de45fSGirish Mahadevan 	 */
553fc129a43SDouglas Anderson 	geni_se_init(se, mas->tx_fifo_depth - 3, mas->tx_fifo_depth - 2);
554561de45fSGirish Mahadevan 	/* Transmit an entire FIFO worth of data per IRQ */
555561de45fSGirish Mahadevan 	mas->tx_wm = 1;
556561de45fSGirish Mahadevan 	ver = geni_se_get_qup_hw_version(se);
557561de45fSGirish Mahadevan 	major = GENI_SE_VERSION_MAJOR(ver);
558561de45fSGirish Mahadevan 	minor = GENI_SE_VERSION_MINOR(ver);
559561de45fSGirish Mahadevan 
560561de45fSGirish Mahadevan 	if (major == 1 && minor == 0)
561561de45fSGirish Mahadevan 		mas->oversampling = 2;
562561de45fSGirish Mahadevan 	else
563561de45fSGirish Mahadevan 		mas->oversampling = 1;
564561de45fSGirish Mahadevan 
565b59c1224SVinod Koul 	fifo_disable = readl(se->base + GENI_IF_DISABLE_RO) & FIFO_IF_DISABLE;
566b59c1224SVinod Koul 	switch (fifo_disable) {
567b59c1224SVinod Koul 	case 1:
568b59c1224SVinod Koul 		ret = spi_geni_grab_gpi_chan(mas);
569b59c1224SVinod Koul 		if (!ret) { /* success case */
570b59c1224SVinod Koul 			mas->cur_xfer_mode = GENI_GPI_DMA;
571b59c1224SVinod Koul 			geni_se_select_mode(se, GENI_GPI_DMA);
572b59c1224SVinod Koul 			dev_dbg(mas->dev, "Using GPI DMA mode for SPI\n");
573b59c1224SVinod Koul 			break;
574b59c1224SVinod Koul 		}
575b59c1224SVinod Koul 		/*
576b59c1224SVinod Koul 		 * in case of failure to get dma channel, we can still do the
577b59c1224SVinod Koul 		 * FIFO mode, so fallthrough
578b59c1224SVinod Koul 		 */
579b59c1224SVinod Koul 		dev_warn(mas->dev, "FIFO mode disabled, but couldn't get DMA, fall back to FIFO mode\n");
580b59c1224SVinod Koul 		fallthrough;
581b59c1224SVinod Koul 
582b59c1224SVinod Koul 	case 0:
583b59c1224SVinod Koul 		mas->cur_xfer_mode = GENI_SE_FIFO;
584da48dc8cSDouglas Anderson 		geni_se_select_mode(se, GENI_SE_FIFO);
585b59c1224SVinod Koul 		ret = 0;
586b59c1224SVinod Koul 		break;
587b59c1224SVinod Koul 	}
588da48dc8cSDouglas Anderson 
58914ac4e04SDouglas Anderson 	/* We always control CS manually */
59014ac4e04SDouglas Anderson 	spi_tx_cfg = readl(se->base + SE_SPI_TRANS_CFG);
59114ac4e04SDouglas Anderson 	spi_tx_cfg &= ~CS_TOGGLE;
59214ac4e04SDouglas Anderson 	writel(spi_tx_cfg, se->base + SE_SPI_TRANS_CFG);
59314ac4e04SDouglas Anderson 
594b59c1224SVinod Koul out_pm:
595561de45fSGirish Mahadevan 	pm_runtime_put(mas->dev);
596b59c1224SVinod Koul 	return ret;
597561de45fSGirish Mahadevan }
598561de45fSGirish Mahadevan 
5996d66507dSDouglas Anderson static unsigned int geni_byte_per_fifo_word(struct spi_geni_master *mas)
6006d66507dSDouglas Anderson {
6016d66507dSDouglas Anderson 	/*
6026d66507dSDouglas Anderson 	 * Calculate how many bytes we'll put in each FIFO word.  If the
6036d66507dSDouglas Anderson 	 * transfer words don't pack cleanly into a FIFO word we'll just put
6046d66507dSDouglas Anderson 	 * one transfer word in each FIFO word.  If they do pack we'll pack 'em.
6056d66507dSDouglas Anderson 	 */
6066d66507dSDouglas Anderson 	if (mas->fifo_width_bits % mas->cur_bits_per_word)
6076d66507dSDouglas Anderson 		return roundup_pow_of_two(DIV_ROUND_UP(mas->cur_bits_per_word,
6086d66507dSDouglas Anderson 						       BITS_PER_BYTE));
6096d66507dSDouglas Anderson 
6106d66507dSDouglas Anderson 	return mas->fifo_width_bits / BITS_PER_BYTE;
6116d66507dSDouglas Anderson }
6126d66507dSDouglas Anderson 
6136d66507dSDouglas Anderson static bool geni_spi_handle_tx(struct spi_geni_master *mas)
6146d66507dSDouglas Anderson {
6156d66507dSDouglas Anderson 	struct geni_se *se = &mas->se;
6166d66507dSDouglas Anderson 	unsigned int max_bytes;
6176d66507dSDouglas Anderson 	const u8 *tx_buf;
6186d66507dSDouglas Anderson 	unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
6196d66507dSDouglas Anderson 	unsigned int i = 0;
6206d66507dSDouglas Anderson 
6214aa1464aSDouglas Anderson 	/* Stop the watermark IRQ if nothing to send */
6224aa1464aSDouglas Anderson 	if (!mas->cur_xfer) {
6234aa1464aSDouglas Anderson 		writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
6244aa1464aSDouglas Anderson 		return false;
6254aa1464aSDouglas Anderson 	}
6264aa1464aSDouglas Anderson 
6276d66507dSDouglas Anderson 	max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * bytes_per_fifo_word;
6286d66507dSDouglas Anderson 	if (mas->tx_rem_bytes < max_bytes)
6296d66507dSDouglas Anderson 		max_bytes = mas->tx_rem_bytes;
6306d66507dSDouglas Anderson 
6316d66507dSDouglas Anderson 	tx_buf = mas->cur_xfer->tx_buf + mas->cur_xfer->len - mas->tx_rem_bytes;
6326d66507dSDouglas Anderson 	while (i < max_bytes) {
6336d66507dSDouglas Anderson 		unsigned int j;
6346d66507dSDouglas Anderson 		unsigned int bytes_to_write;
6356d66507dSDouglas Anderson 		u32 fifo_word = 0;
6366d66507dSDouglas Anderson 		u8 *fifo_byte = (u8 *)&fifo_word;
6376d66507dSDouglas Anderson 
6386d66507dSDouglas Anderson 		bytes_to_write = min(bytes_per_fifo_word, max_bytes - i);
6396d66507dSDouglas Anderson 		for (j = 0; j < bytes_to_write; j++)
6406d66507dSDouglas Anderson 			fifo_byte[j] = tx_buf[i++];
6416d66507dSDouglas Anderson 		iowrite32_rep(se->base + SE_GENI_TX_FIFOn, &fifo_word, 1);
6426d66507dSDouglas Anderson 	}
6436d66507dSDouglas Anderson 	mas->tx_rem_bytes -= max_bytes;
6446d66507dSDouglas Anderson 	if (!mas->tx_rem_bytes) {
6456d66507dSDouglas Anderson 		writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
6466d66507dSDouglas Anderson 		return false;
6476d66507dSDouglas Anderson 	}
6486d66507dSDouglas Anderson 	return true;
6496d66507dSDouglas Anderson }
6506d66507dSDouglas Anderson 
6516d66507dSDouglas Anderson static void geni_spi_handle_rx(struct spi_geni_master *mas)
6526d66507dSDouglas Anderson {
6536d66507dSDouglas Anderson 	struct geni_se *se = &mas->se;
6546d66507dSDouglas Anderson 	u32 rx_fifo_status;
6556d66507dSDouglas Anderson 	unsigned int rx_bytes;
6566d66507dSDouglas Anderson 	unsigned int rx_last_byte_valid;
6576d66507dSDouglas Anderson 	u8 *rx_buf;
6586d66507dSDouglas Anderson 	unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
6596d66507dSDouglas Anderson 	unsigned int i = 0;
6606d66507dSDouglas Anderson 
6616d66507dSDouglas Anderson 	rx_fifo_status = readl(se->base + SE_GENI_RX_FIFO_STATUS);
6626d66507dSDouglas Anderson 	rx_bytes = (rx_fifo_status & RX_FIFO_WC_MSK) * bytes_per_fifo_word;
6636d66507dSDouglas Anderson 	if (rx_fifo_status & RX_LAST) {
6646d66507dSDouglas Anderson 		rx_last_byte_valid = rx_fifo_status & RX_LAST_BYTE_VALID_MSK;
6656d66507dSDouglas Anderson 		rx_last_byte_valid >>= RX_LAST_BYTE_VALID_SHFT;
6666d66507dSDouglas Anderson 		if (rx_last_byte_valid && rx_last_byte_valid < 4)
6676d66507dSDouglas Anderson 			rx_bytes -= bytes_per_fifo_word - rx_last_byte_valid;
6686d66507dSDouglas Anderson 	}
6694aa1464aSDouglas Anderson 
6704aa1464aSDouglas Anderson 	/* Clear out the FIFO and bail if nowhere to put it */
6714aa1464aSDouglas Anderson 	if (!mas->cur_xfer) {
6724aa1464aSDouglas Anderson 		for (i = 0; i < DIV_ROUND_UP(rx_bytes, bytes_per_fifo_word); i++)
6734aa1464aSDouglas Anderson 			readl(se->base + SE_GENI_RX_FIFOn);
6744aa1464aSDouglas Anderson 		return;
6754aa1464aSDouglas Anderson 	}
6764aa1464aSDouglas Anderson 
6776d66507dSDouglas Anderson 	if (mas->rx_rem_bytes < rx_bytes)
6786d66507dSDouglas Anderson 		rx_bytes = mas->rx_rem_bytes;
6796d66507dSDouglas Anderson 
6806d66507dSDouglas Anderson 	rx_buf = mas->cur_xfer->rx_buf + mas->cur_xfer->len - mas->rx_rem_bytes;
6816d66507dSDouglas Anderson 	while (i < rx_bytes) {
6826d66507dSDouglas Anderson 		u32 fifo_word = 0;
6836d66507dSDouglas Anderson 		u8 *fifo_byte = (u8 *)&fifo_word;
6846d66507dSDouglas Anderson 		unsigned int bytes_to_read;
6856d66507dSDouglas Anderson 		unsigned int j;
6866d66507dSDouglas Anderson 
6876d66507dSDouglas Anderson 		bytes_to_read = min(bytes_per_fifo_word, rx_bytes - i);
6886d66507dSDouglas Anderson 		ioread32_rep(se->base + SE_GENI_RX_FIFOn, &fifo_word, 1);
6896d66507dSDouglas Anderson 		for (j = 0; j < bytes_to_read; j++)
6906d66507dSDouglas Anderson 			rx_buf[i++] = fifo_byte[j];
6916d66507dSDouglas Anderson 	}
6926d66507dSDouglas Anderson 	mas->rx_rem_bytes -= rx_bytes;
6936d66507dSDouglas Anderson }
6946d66507dSDouglas Anderson 
695561de45fSGirish Mahadevan static void setup_fifo_xfer(struct spi_transfer *xfer,
696561de45fSGirish Mahadevan 				struct spi_geni_master *mas,
697561de45fSGirish Mahadevan 				u16 mode, struct spi_master *spi)
698561de45fSGirish Mahadevan {
699561de45fSGirish Mahadevan 	u32 m_cmd = 0;
70014ac4e04SDouglas Anderson 	u32 len;
701561de45fSGirish Mahadevan 	struct geni_se *se = &mas->se;
702e68b6624SDouglas Anderson 	int ret;
703561de45fSGirish Mahadevan 
7042ee471a1SDouglas Anderson 	/*
7052ee471a1SDouglas Anderson 	 * Ensure that our interrupt handler isn't still running from some
7062ee471a1SDouglas Anderson 	 * prior command before we start messing with the hardware behind
7072ee471a1SDouglas Anderson 	 * its back.  We don't need to _keep_ the lock here since we're only
7082ee471a1SDouglas Anderson 	 * worried about racing with out interrupt handler.  The SPI core
7092ee471a1SDouglas Anderson 	 * already handles making sure that we're not trying to do two
7102ee471a1SDouglas Anderson 	 * transfers at once or setting a chip select and doing a transfer
7112ee471a1SDouglas Anderson 	 * concurrently.
7122ee471a1SDouglas Anderson 	 *
7132ee471a1SDouglas Anderson 	 * NOTE: we actually _can't_ hold the lock here because possibly we
7142ee471a1SDouglas Anderson 	 * might call clk_set_rate() which needs to be able to sleep.
7152ee471a1SDouglas Anderson 	 */
7162ee471a1SDouglas Anderson 	spin_lock_irq(&mas->lock);
7172ee471a1SDouglas Anderson 	spin_unlock_irq(&mas->lock);
7182ee471a1SDouglas Anderson 
719561de45fSGirish Mahadevan 	if (xfer->bits_per_word != mas->cur_bits_per_word) {
720561de45fSGirish Mahadevan 		spi_setup_word_len(mas, mode, xfer->bits_per_word);
721561de45fSGirish Mahadevan 		mas->cur_bits_per_word = xfer->bits_per_word;
722561de45fSGirish Mahadevan 	}
723561de45fSGirish Mahadevan 
724561de45fSGirish Mahadevan 	/* Speed and bits per word can be overridden per transfer */
7250e3b8a81SAkash Asthana 	ret = geni_spi_set_clock_and_bw(mas, xfer->speed_hz);
726e68b6624SDouglas Anderson 	if (ret)
727561de45fSGirish Mahadevan 		return;
728561de45fSGirish Mahadevan 
729561de45fSGirish Mahadevan 	mas->tx_rem_bytes = 0;
730561de45fSGirish Mahadevan 	mas->rx_rem_bytes = 0;
731561de45fSGirish Mahadevan 
732561de45fSGirish Mahadevan 	if (!(mas->cur_bits_per_word % MIN_WORD_LEN))
733561de45fSGirish Mahadevan 		len = xfer->len * BITS_PER_BYTE / mas->cur_bits_per_word;
734561de45fSGirish Mahadevan 	else
735561de45fSGirish Mahadevan 		len = xfer->len / (mas->cur_bits_per_word / BITS_PER_BYTE + 1);
736561de45fSGirish Mahadevan 	len &= TRANS_LEN_MSK;
737561de45fSGirish Mahadevan 
738561de45fSGirish Mahadevan 	mas->cur_xfer = xfer;
73919ea3275SStephen Boyd 	if (xfer->tx_buf) {
74019ea3275SStephen Boyd 		m_cmd |= SPI_TX_ONLY;
741561de45fSGirish Mahadevan 		mas->tx_rem_bytes = xfer->len;
742561de45fSGirish Mahadevan 		writel(len, se->base + SE_SPI_TX_TRANS_LEN);
743561de45fSGirish Mahadevan 	}
744561de45fSGirish Mahadevan 
74519ea3275SStephen Boyd 	if (xfer->rx_buf) {
74619ea3275SStephen Boyd 		m_cmd |= SPI_RX_ONLY;
747561de45fSGirish Mahadevan 		writel(len, se->base + SE_SPI_RX_TRANS_LEN);
748561de45fSGirish Mahadevan 		mas->rx_rem_bytes = xfer->len;
749561de45fSGirish Mahadevan 	}
7502ee471a1SDouglas Anderson 
7512ee471a1SDouglas Anderson 	/*
7522ee471a1SDouglas Anderson 	 * Lock around right before we start the transfer since our
7532ee471a1SDouglas Anderson 	 * interrupt could come in at any time now.
7542ee471a1SDouglas Anderson 	 */
7552ee471a1SDouglas Anderson 	spin_lock_irq(&mas->lock);
756561de45fSGirish Mahadevan 	geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
7576d66507dSDouglas Anderson 	if (m_cmd & SPI_TX_ONLY) {
7586d66507dSDouglas Anderson 		if (geni_spi_handle_tx(mas))
759561de45fSGirish Mahadevan 			writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
7606d66507dSDouglas Anderson 	}
7612ee471a1SDouglas Anderson 	spin_unlock_irq(&mas->lock);
762561de45fSGirish Mahadevan }
763561de45fSGirish Mahadevan 
764561de45fSGirish Mahadevan static int spi_geni_transfer_one(struct spi_master *spi,
765561de45fSGirish Mahadevan 				struct spi_device *slv,
766561de45fSGirish Mahadevan 				struct spi_transfer *xfer)
767561de45fSGirish Mahadevan {
768561de45fSGirish Mahadevan 	struct spi_geni_master *mas = spi_master_get_devdata(spi);
769561de45fSGirish Mahadevan 
770690d8b91SDouglas Anderson 	if (spi_geni_is_abort_still_pending(mas))
771690d8b91SDouglas Anderson 		return -EBUSY;
772690d8b91SDouglas Anderson 
773561de45fSGirish Mahadevan 	/* Terminate and return success for 0 byte length transfer */
774561de45fSGirish Mahadevan 	if (!xfer->len)
775561de45fSGirish Mahadevan 		return 0;
776561de45fSGirish Mahadevan 
777b59c1224SVinod Koul 	if (mas->cur_xfer_mode == GENI_SE_FIFO) {
778561de45fSGirish Mahadevan 		setup_fifo_xfer(xfer, mas, slv->mode, spi);
779561de45fSGirish Mahadevan 		return 1;
780561de45fSGirish Mahadevan 	}
781b59c1224SVinod Koul 	return setup_gsi_xfer(xfer, mas, slv, spi);
782b59c1224SVinod Koul }
783561de45fSGirish Mahadevan 
784561de45fSGirish Mahadevan static irqreturn_t geni_spi_isr(int irq, void *data)
785561de45fSGirish Mahadevan {
786561de45fSGirish Mahadevan 	struct spi_master *spi = data;
787561de45fSGirish Mahadevan 	struct spi_geni_master *mas = spi_master_get_devdata(spi);
788561de45fSGirish Mahadevan 	struct geni_se *se = &mas->se;
789561de45fSGirish Mahadevan 	u32 m_irq;
790561de45fSGirish Mahadevan 
7912ee471a1SDouglas Anderson 	m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
7922ee471a1SDouglas Anderson 	if (!m_irq)
793561de45fSGirish Mahadevan 		return IRQ_NONE;
794561de45fSGirish Mahadevan 
795e191a082SDouglas Anderson 	if (m_irq & (M_CMD_OVERRUN_EN | M_ILLEGAL_CMD_EN | M_CMD_FAILURE_EN |
796e191a082SDouglas Anderson 		     M_RX_FIFO_RD_ERR_EN | M_RX_FIFO_WR_ERR_EN |
797e191a082SDouglas Anderson 		     M_TX_FIFO_RD_ERR_EN | M_TX_FIFO_WR_ERR_EN))
798e191a082SDouglas Anderson 		dev_warn(mas->dev, "Unexpected IRQ err status %#010x\n", m_irq);
799e191a082SDouglas Anderson 
800539afdf9SDouglas Anderson 	spin_lock(&mas->lock);
801561de45fSGirish Mahadevan 
802561de45fSGirish Mahadevan 	if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
803561de45fSGirish Mahadevan 		geni_spi_handle_rx(mas);
804561de45fSGirish Mahadevan 
805561de45fSGirish Mahadevan 	if (m_irq & M_TX_FIFO_WATERMARK_EN)
806561de45fSGirish Mahadevan 		geni_spi_handle_tx(mas);
807561de45fSGirish Mahadevan 
808561de45fSGirish Mahadevan 	if (m_irq & M_CMD_DONE_EN) {
8097ba9bdcbSDouglas Anderson 		if (mas->cur_xfer) {
810561de45fSGirish Mahadevan 			spi_finalize_current_transfer(spi);
8117ba9bdcbSDouglas Anderson 			mas->cur_xfer = NULL;
812561de45fSGirish Mahadevan 			/*
81359ab0fa0SStephen Boyd 			 * If this happens, then a CMD_DONE came before all the
81459ab0fa0SStephen Boyd 			 * Tx buffer bytes were sent out. This is unusual, log
81559ab0fa0SStephen Boyd 			 * this condition and disable the WM interrupt to
81659ab0fa0SStephen Boyd 			 * prevent the system from stalling due an interrupt
81759ab0fa0SStephen Boyd 			 * storm.
81859ab0fa0SStephen Boyd 			 *
81959ab0fa0SStephen Boyd 			 * If this happens when all Rx bytes haven't been
82059ab0fa0SStephen Boyd 			 * received, log the condition. The only known time
82159ab0fa0SStephen Boyd 			 * this can happen is if bits_per_word != 8 and some
82259ab0fa0SStephen Boyd 			 * registers that expect xfer lengths in num spi_words
823561de45fSGirish Mahadevan 			 * weren't written correctly.
824561de45fSGirish Mahadevan 			 */
825561de45fSGirish Mahadevan 			if (mas->tx_rem_bytes) {
826561de45fSGirish Mahadevan 				writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
827561de45fSGirish Mahadevan 				dev_err(mas->dev, "Premature done. tx_rem = %d bpw%d\n",
828561de45fSGirish Mahadevan 					mas->tx_rem_bytes, mas->cur_bits_per_word);
829561de45fSGirish Mahadevan 			}
830561de45fSGirish Mahadevan 			if (mas->rx_rem_bytes)
831561de45fSGirish Mahadevan 				dev_err(mas->dev, "Premature done. rx_rem = %d bpw%d\n",
832561de45fSGirish Mahadevan 					mas->rx_rem_bytes, mas->cur_bits_per_word);
83359ab0fa0SStephen Boyd 		} else {
83459ab0fa0SStephen Boyd 			complete(&mas->cs_done);
83559ab0fa0SStephen Boyd 		}
836561de45fSGirish Mahadevan 	}
837561de45fSGirish Mahadevan 
8387ba9bdcbSDouglas Anderson 	if (m_irq & M_CMD_CANCEL_EN)
8397ba9bdcbSDouglas Anderson 		complete(&mas->cancel_done);
8407ba9bdcbSDouglas Anderson 	if (m_irq & M_CMD_ABORT_EN)
8417ba9bdcbSDouglas Anderson 		complete(&mas->abort_done);
842561de45fSGirish Mahadevan 
8432ee471a1SDouglas Anderson 	/*
844db56d030SJay Fang 	 * It's safe or a good idea to Ack all of our interrupts at the end
845db56d030SJay Fang 	 * of the function. Specifically:
8462ee471a1SDouglas Anderson 	 * - M_CMD_DONE_EN / M_RX_FIFO_LAST_EN: Edge triggered interrupts and
8472ee471a1SDouglas Anderson 	 *   clearing Acks. Clearing at the end relies on nobody else having
8482ee471a1SDouglas Anderson 	 *   started a new transfer yet or else we could be clearing _their_
8492ee471a1SDouglas Anderson 	 *   done bit, but everyone grabs the spinlock before starting a new
8502ee471a1SDouglas Anderson 	 *   transfer.
8512ee471a1SDouglas Anderson 	 * - M_RX_FIFO_WATERMARK_EN / M_TX_FIFO_WATERMARK_EN: These appear
8522ee471a1SDouglas Anderson 	 *   to be "latched level" interrupts so it's important to clear them
8532ee471a1SDouglas Anderson 	 *   _after_ you've handled the condition and always safe to do so
8542ee471a1SDouglas Anderson 	 *   since they'll re-assert if they're still happening.
8552ee471a1SDouglas Anderson 	 */
856561de45fSGirish Mahadevan 	writel(m_irq, se->base + SE_GENI_M_IRQ_CLEAR);
8572ee471a1SDouglas Anderson 
858539afdf9SDouglas Anderson 	spin_unlock(&mas->lock);
8592ee471a1SDouglas Anderson 
8600dccff3cSAlok Chauhan 	return IRQ_HANDLED;
861561de45fSGirish Mahadevan }
862561de45fSGirish Mahadevan 
863561de45fSGirish Mahadevan static int spi_geni_probe(struct platform_device *pdev)
864561de45fSGirish Mahadevan {
8656a34e285SAlok Chauhan 	int ret, irq;
866561de45fSGirish Mahadevan 	struct spi_master *spi;
867561de45fSGirish Mahadevan 	struct spi_geni_master *mas;
8686a34e285SAlok Chauhan 	void __iomem *base;
8696a34e285SAlok Chauhan 	struct clk *clk;
870ea1e5b33SStephen Boyd 	struct device *dev = &pdev->dev;
8716a34e285SAlok Chauhan 
8726a34e285SAlok Chauhan 	irq = platform_get_irq(pdev, 0);
8736b8ac10eSStephen Boyd 	if (irq < 0)
8746a34e285SAlok Chauhan 		return irq;
8756a34e285SAlok Chauhan 
876b59c1224SVinod Koul 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
877b59c1224SVinod Koul 	if (ret) {
878b59c1224SVinod Koul 		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
879b59c1224SVinod Koul 		if (ret)
880b59c1224SVinod Koul 			return dev_err_probe(dev, ret, "could not set DMA mask\n");
881b59c1224SVinod Koul 	}
882b59c1224SVinod Koul 
883d8e477abSYueHaibing 	base = devm_platform_ioremap_resource(pdev, 0);
8846a34e285SAlok Chauhan 	if (IS_ERR(base))
8856a34e285SAlok Chauhan 		return PTR_ERR(base);
8866a34e285SAlok Chauhan 
887ea1e5b33SStephen Boyd 	clk = devm_clk_get(dev, "se");
888ea1e5b33SStephen Boyd 	if (IS_ERR(clk))
8896a34e285SAlok Chauhan 		return PTR_ERR(clk);
890561de45fSGirish Mahadevan 
8918f96c434SLukas Wunner 	spi = devm_spi_alloc_master(dev, sizeof(*mas));
892561de45fSGirish Mahadevan 	if (!spi)
893561de45fSGirish Mahadevan 		return -ENOMEM;
894561de45fSGirish Mahadevan 
895561de45fSGirish Mahadevan 	platform_set_drvdata(pdev, spi);
896561de45fSGirish Mahadevan 	mas = spi_master_get_devdata(spi);
8976a34e285SAlok Chauhan 	mas->irq = irq;
898ea1e5b33SStephen Boyd 	mas->dev = dev;
899ea1e5b33SStephen Boyd 	mas->se.dev = dev;
900ea1e5b33SStephen Boyd 	mas->se.wrapper = dev_get_drvdata(dev->parent);
9016a34e285SAlok Chauhan 	mas->se.base = base;
9026a34e285SAlok Chauhan 	mas->se.clk = clk;
903cfb12911SYangtao Li 
904cfb12911SYangtao Li 	ret = devm_pm_opp_set_clkname(&pdev->dev, "se");
905cfb12911SYangtao Li 	if (ret)
906cfb12911SYangtao Li 		return ret;
9071a9e489eSRajendra Nayak 	/* OPP table is optional */
908cfb12911SYangtao Li 	ret = devm_pm_opp_of_add_table(&pdev->dev);
9097d568edfSViresh Kumar 	if (ret && ret != -ENODEV) {
9101a9e489eSRajendra Nayak 		dev_err(&pdev->dev, "invalid OPP table in device tree\n");
911cfb12911SYangtao Li 		return ret;
9121a9e489eSRajendra Nayak 	}
913561de45fSGirish Mahadevan 
914561de45fSGirish Mahadevan 	spi->bus_num = -1;
915ea1e5b33SStephen Boyd 	spi->dev.of_node = dev->of_node;
916561de45fSGirish Mahadevan 	spi->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH;
917561de45fSGirish Mahadevan 	spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
918561de45fSGirish Mahadevan 	spi->num_chipselect = 4;
919561de45fSGirish Mahadevan 	spi->max_speed_hz = 50000000;
920561de45fSGirish Mahadevan 	spi->prepare_message = spi_geni_prepare_message;
921561de45fSGirish Mahadevan 	spi->transfer_one = spi_geni_transfer_one;
922b59c1224SVinod Koul 	spi->can_dma = geni_can_dma;
923b59c1224SVinod Koul 	spi->dma_map_dev = dev->parent;
924561de45fSGirish Mahadevan 	spi->auto_runtime_pm = true;
925561de45fSGirish Mahadevan 	spi->handle_err = handle_fifo_timeout;
9263b25f337SStephen Boyd 	spi->use_gpio_descriptors = true;
927561de45fSGirish Mahadevan 
9287ba9bdcbSDouglas Anderson 	init_completion(&mas->cs_done);
9297ba9bdcbSDouglas Anderson 	init_completion(&mas->cancel_done);
9307ba9bdcbSDouglas Anderson 	init_completion(&mas->abort_done);
931561de45fSGirish Mahadevan 	spin_lock_init(&mas->lock);
932cfdab2cdSDouglas Anderson 	pm_runtime_use_autosuspend(&pdev->dev);
933cfdab2cdSDouglas Anderson 	pm_runtime_set_autosuspend_delay(&pdev->dev, 250);
934ea1e5b33SStephen Boyd 	pm_runtime_enable(dev);
935561de45fSGirish Mahadevan 
9360e3b8a81SAkash Asthana 	ret = geni_icc_get(&mas->se, NULL);
9370e3b8a81SAkash Asthana 	if (ret)
9380e3b8a81SAkash Asthana 		goto spi_geni_probe_runtime_disable;
9390e3b8a81SAkash Asthana 	/* Set the bus quota to a reasonable value for register access */
9400e3b8a81SAkash Asthana 	mas->se.icc_paths[GENI_TO_CORE].avg_bw = Bps_to_icc(CORE_2X_50_MHZ);
9410e3b8a81SAkash Asthana 	mas->se.icc_paths[CPU_TO_GENI].avg_bw = GENI_DEFAULT_BW;
9420e3b8a81SAkash Asthana 
9430e3b8a81SAkash Asthana 	ret = geni_icc_set_bw(&mas->se);
9440e3b8a81SAkash Asthana 	if (ret)
9450e3b8a81SAkash Asthana 		goto spi_geni_probe_runtime_disable;
9460e3b8a81SAkash Asthana 
947561de45fSGirish Mahadevan 	ret = spi_geni_init(mas);
948561de45fSGirish Mahadevan 	if (ret)
949561de45fSGirish Mahadevan 		goto spi_geni_probe_runtime_disable;
950561de45fSGirish Mahadevan 
951b59c1224SVinod Koul 	/*
952b59c1224SVinod Koul 	 * check the mode supported and set_cs for fifo mode only
953b59c1224SVinod Koul 	 * for dma (gsi) mode, the gsi will set cs based on params passed in
954b59c1224SVinod Koul 	 * TRE
955b59c1224SVinod Koul 	 */
956b59c1224SVinod Koul 	if (mas->cur_xfer_mode == GENI_SE_FIFO)
957b59c1224SVinod Koul 		spi->set_cs = spi_geni_set_cs;
958b59c1224SVinod Koul 
959ea1e5b33SStephen Boyd 	ret = request_irq(mas->irq, geni_spi_isr, 0, dev_name(dev), spi);
960561de45fSGirish Mahadevan 	if (ret)
961b59c1224SVinod Koul 		goto spi_geni_release_dma;
962561de45fSGirish Mahadevan 
963561de45fSGirish Mahadevan 	ret = spi_register_master(spi);
964561de45fSGirish Mahadevan 	if (ret)
965561de45fSGirish Mahadevan 		goto spi_geni_probe_free_irq;
966561de45fSGirish Mahadevan 
967561de45fSGirish Mahadevan 	return 0;
968561de45fSGirish Mahadevan spi_geni_probe_free_irq:
969561de45fSGirish Mahadevan 	free_irq(mas->irq, spi);
970b59c1224SVinod Koul spi_geni_release_dma:
971b59c1224SVinod Koul 	spi_geni_release_dma_chan(mas);
972561de45fSGirish Mahadevan spi_geni_probe_runtime_disable:
973ea1e5b33SStephen Boyd 	pm_runtime_disable(dev);
974561de45fSGirish Mahadevan 	return ret;
975561de45fSGirish Mahadevan }
976561de45fSGirish Mahadevan 
977561de45fSGirish Mahadevan static int spi_geni_remove(struct platform_device *pdev)
978561de45fSGirish Mahadevan {
979561de45fSGirish Mahadevan 	struct spi_master *spi = platform_get_drvdata(pdev);
980561de45fSGirish Mahadevan 	struct spi_geni_master *mas = spi_master_get_devdata(spi);
981561de45fSGirish Mahadevan 
982561de45fSGirish Mahadevan 	/* Unregister _before_ disabling pm_runtime() so we stop transfers */
983561de45fSGirish Mahadevan 	spi_unregister_master(spi);
984561de45fSGirish Mahadevan 
985b59c1224SVinod Koul 	spi_geni_release_dma_chan(mas);
986b59c1224SVinod Koul 
987561de45fSGirish Mahadevan 	free_irq(mas->irq, spi);
988561de45fSGirish Mahadevan 	pm_runtime_disable(&pdev->dev);
989561de45fSGirish Mahadevan 	return 0;
990561de45fSGirish Mahadevan }
991561de45fSGirish Mahadevan 
992561de45fSGirish Mahadevan static int __maybe_unused spi_geni_runtime_suspend(struct device *dev)
993561de45fSGirish Mahadevan {
994561de45fSGirish Mahadevan 	struct spi_master *spi = dev_get_drvdata(dev);
995561de45fSGirish Mahadevan 	struct spi_geni_master *mas = spi_master_get_devdata(spi);
9960e3b8a81SAkash Asthana 	int ret;
997561de45fSGirish Mahadevan 
9981a9e489eSRajendra Nayak 	/* Drop the performance state vote */
9991a9e489eSRajendra Nayak 	dev_pm_opp_set_rate(dev, 0);
10001a9e489eSRajendra Nayak 
10010e3b8a81SAkash Asthana 	ret = geni_se_resources_off(&mas->se);
10020e3b8a81SAkash Asthana 	if (ret)
10030e3b8a81SAkash Asthana 		return ret;
10040e3b8a81SAkash Asthana 
10050e3b8a81SAkash Asthana 	return geni_icc_disable(&mas->se);
1006561de45fSGirish Mahadevan }
1007561de45fSGirish Mahadevan 
1008561de45fSGirish Mahadevan static int __maybe_unused spi_geni_runtime_resume(struct device *dev)
1009561de45fSGirish Mahadevan {
1010561de45fSGirish Mahadevan 	struct spi_master *spi = dev_get_drvdata(dev);
1011561de45fSGirish Mahadevan 	struct spi_geni_master *mas = spi_master_get_devdata(spi);
10120e3b8a81SAkash Asthana 	int ret;
10130e3b8a81SAkash Asthana 
10140e3b8a81SAkash Asthana 	ret = geni_icc_enable(&mas->se);
10150e3b8a81SAkash Asthana 	if (ret)
10160e3b8a81SAkash Asthana 		return ret;
1017561de45fSGirish Mahadevan 
10185f219524SDouglas Anderson 	ret = geni_se_resources_on(&mas->se);
10195f219524SDouglas Anderson 	if (ret)
10205f219524SDouglas Anderson 		return ret;
10215f219524SDouglas Anderson 
10225f219524SDouglas Anderson 	return dev_pm_opp_set_rate(mas->dev, mas->cur_sclk_hz);
1023561de45fSGirish Mahadevan }
1024561de45fSGirish Mahadevan 
1025561de45fSGirish Mahadevan static int __maybe_unused spi_geni_suspend(struct device *dev)
1026561de45fSGirish Mahadevan {
1027561de45fSGirish Mahadevan 	struct spi_master *spi = dev_get_drvdata(dev);
1028561de45fSGirish Mahadevan 	int ret;
1029561de45fSGirish Mahadevan 
1030561de45fSGirish Mahadevan 	ret = spi_master_suspend(spi);
1031561de45fSGirish Mahadevan 	if (ret)
1032561de45fSGirish Mahadevan 		return ret;
1033561de45fSGirish Mahadevan 
1034561de45fSGirish Mahadevan 	ret = pm_runtime_force_suspend(dev);
1035561de45fSGirish Mahadevan 	if (ret)
1036561de45fSGirish Mahadevan 		spi_master_resume(spi);
1037561de45fSGirish Mahadevan 
1038561de45fSGirish Mahadevan 	return ret;
1039561de45fSGirish Mahadevan }
1040561de45fSGirish Mahadevan 
1041561de45fSGirish Mahadevan static int __maybe_unused spi_geni_resume(struct device *dev)
1042561de45fSGirish Mahadevan {
1043561de45fSGirish Mahadevan 	struct spi_master *spi = dev_get_drvdata(dev);
1044561de45fSGirish Mahadevan 	int ret;
1045561de45fSGirish Mahadevan 
1046561de45fSGirish Mahadevan 	ret = pm_runtime_force_resume(dev);
1047561de45fSGirish Mahadevan 	if (ret)
1048561de45fSGirish Mahadevan 		return ret;
1049561de45fSGirish Mahadevan 
1050561de45fSGirish Mahadevan 	ret = spi_master_resume(spi);
1051561de45fSGirish Mahadevan 	if (ret)
1052561de45fSGirish Mahadevan 		pm_runtime_force_suspend(dev);
1053561de45fSGirish Mahadevan 
1054561de45fSGirish Mahadevan 	return ret;
1055561de45fSGirish Mahadevan }
1056561de45fSGirish Mahadevan 
1057561de45fSGirish Mahadevan static const struct dev_pm_ops spi_geni_pm_ops = {
1058561de45fSGirish Mahadevan 	SET_RUNTIME_PM_OPS(spi_geni_runtime_suspend,
1059561de45fSGirish Mahadevan 					spi_geni_runtime_resume, NULL)
1060561de45fSGirish Mahadevan 	SET_SYSTEM_SLEEP_PM_OPS(spi_geni_suspend, spi_geni_resume)
1061561de45fSGirish Mahadevan };
1062561de45fSGirish Mahadevan 
1063561de45fSGirish Mahadevan static const struct of_device_id spi_geni_dt_match[] = {
1064561de45fSGirish Mahadevan 	{ .compatible = "qcom,geni-spi" },
1065561de45fSGirish Mahadevan 	{}
1066561de45fSGirish Mahadevan };
1067561de45fSGirish Mahadevan MODULE_DEVICE_TABLE(of, spi_geni_dt_match);
1068561de45fSGirish Mahadevan 
1069561de45fSGirish Mahadevan static struct platform_driver spi_geni_driver = {
1070561de45fSGirish Mahadevan 	.probe  = spi_geni_probe,
1071561de45fSGirish Mahadevan 	.remove = spi_geni_remove,
1072561de45fSGirish Mahadevan 	.driver = {
1073561de45fSGirish Mahadevan 		.name = "geni_spi",
1074561de45fSGirish Mahadevan 		.pm = &spi_geni_pm_ops,
1075561de45fSGirish Mahadevan 		.of_match_table = spi_geni_dt_match,
1076561de45fSGirish Mahadevan 	},
1077561de45fSGirish Mahadevan };
1078561de45fSGirish Mahadevan module_platform_driver(spi_geni_driver);
1079561de45fSGirish Mahadevan 
1080561de45fSGirish Mahadevan MODULE_DESCRIPTION("SPI driver for GENI based QUP cores");
1081561de45fSGirish Mahadevan MODULE_LICENSE("GPL v2");
1082