xref: /linux/drivers/spi/spi-mt65xx.c (revision 7a9b709e7cc5ce1ffb84ce07bf6d157e1de758df)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015 MediaTek Inc.
4  * Author: Leilk Liu <leilk.liu@mediatek.com>
5  */
6 
7 #include <linux/clk.h>
8 #include <linux/device.h>
9 #include <linux/err.h>
10 #include <linux/interrupt.h>
11 #include <linux/io.h>
12 #include <linux/ioport.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/gpio/consumer.h>
16 #include <linux/pinctrl/consumer.h>
17 #include <linux/platform_device.h>
18 #include <linux/platform_data/spi-mt65xx.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/spi/spi.h>
21 #include <linux/spi/spi-mem.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/pm_qos.h>
24 
25 #define SPI_CFG0_REG			0x0000
26 #define SPI_CFG1_REG			0x0004
27 #define SPI_TX_SRC_REG			0x0008
28 #define SPI_RX_DST_REG			0x000c
29 #define SPI_TX_DATA_REG			0x0010
30 #define SPI_RX_DATA_REG			0x0014
31 #define SPI_CMD_REG			0x0018
32 #define SPI_STATUS0_REG			0x001c
33 #define SPI_PAD_SEL_REG			0x0024
34 #define SPI_CFG2_REG			0x0028
35 #define SPI_TX_SRC_REG_64		0x002c
36 #define SPI_RX_DST_REG_64		0x0030
37 #define SPI_CFG3_IPM_REG		0x0040
38 
39 #define SPI_CFG0_SCK_HIGH_OFFSET	0
40 #define SPI_CFG0_SCK_LOW_OFFSET		8
41 #define SPI_CFG0_CS_HOLD_OFFSET		16
42 #define SPI_CFG0_CS_SETUP_OFFSET	24
43 #define SPI_ADJUST_CFG0_CS_HOLD_OFFSET	0
44 #define SPI_ADJUST_CFG0_CS_SETUP_OFFSET	16
45 
46 #define SPI_CFG1_CS_IDLE_OFFSET		0
47 #define SPI_CFG1_PACKET_LOOP_OFFSET	8
48 #define SPI_CFG1_PACKET_LENGTH_OFFSET	16
49 #define SPI_CFG1_GET_TICK_DLY_OFFSET	29
50 #define SPI_CFG1_GET_TICK_DLY_OFFSET_V1	30
51 
52 #define SPI_CFG1_GET_TICK_DLY_MASK	0xe0000000
53 #define SPI_CFG1_GET_TICK_DLY_MASK_V1	0xc0000000
54 
55 #define SPI_CFG1_CS_IDLE_MASK		0xff
56 #define SPI_CFG1_PACKET_LOOP_MASK	0xff00
57 #define SPI_CFG1_PACKET_LENGTH_MASK	0x3ff0000
58 #define SPI_CFG1_IPM_PACKET_LENGTH_MASK	GENMASK(31, 16)
59 #define SPI_CFG2_SCK_HIGH_OFFSET	0
60 #define SPI_CFG2_SCK_LOW_OFFSET		16
61 
62 #define SPI_CMD_ACT			BIT(0)
63 #define SPI_CMD_RESUME			BIT(1)
64 #define SPI_CMD_RST			BIT(2)
65 #define SPI_CMD_PAUSE_EN		BIT(4)
66 #define SPI_CMD_DEASSERT		BIT(5)
67 #define SPI_CMD_SAMPLE_SEL		BIT(6)
68 #define SPI_CMD_CS_POL			BIT(7)
69 #define SPI_CMD_CPHA			BIT(8)
70 #define SPI_CMD_CPOL			BIT(9)
71 #define SPI_CMD_RX_DMA			BIT(10)
72 #define SPI_CMD_TX_DMA			BIT(11)
73 #define SPI_CMD_TXMSBF			BIT(12)
74 #define SPI_CMD_RXMSBF			BIT(13)
75 #define SPI_CMD_RX_ENDIAN		BIT(14)
76 #define SPI_CMD_TX_ENDIAN		BIT(15)
77 #define SPI_CMD_FINISH_IE		BIT(16)
78 #define SPI_CMD_PAUSE_IE		BIT(17)
79 #define SPI_CMD_IPM_NONIDLE_MODE	BIT(19)
80 #define SPI_CMD_IPM_SPIM_LOOP		BIT(21)
81 #define SPI_CMD_IPM_GET_TICKDLY_OFFSET	22
82 
83 #define SPI_CMD_IPM_GET_TICKDLY_MASK	GENMASK(24, 22)
84 
85 #define PIN_MODE_CFG(x)	((x) / 2)
86 
87 #define SPI_CFG3_IPM_HALF_DUPLEX_DIR	BIT(2)
88 #define SPI_CFG3_IPM_HALF_DUPLEX_EN	BIT(3)
89 #define SPI_CFG3_IPM_XMODE_EN		BIT(4)
90 #define SPI_CFG3_IPM_NODATA_FLAG	BIT(5)
91 #define SPI_CFG3_IPM_CMD_BYTELEN_OFFSET	8
92 #define SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET 12
93 
94 #define SPI_CFG3_IPM_CMD_PIN_MODE_MASK	GENMASK(1, 0)
95 #define SPI_CFG3_IPM_CMD_BYTELEN_MASK	GENMASK(11, 8)
96 #define SPI_CFG3_IPM_ADDR_BYTELEN_MASK	GENMASK(15, 12)
97 
98 #define MT8173_SPI_MAX_PAD_SEL		3
99 
100 #define MTK_SPI_PAUSE_INT_STATUS	0x2
101 
102 #define MTK_SPI_MAX_FIFO_SIZE		32U
103 #define MTK_SPI_PACKET_SIZE		1024
104 #define MTK_SPI_IPM_PACKET_SIZE		SZ_64K
105 #define MTK_SPI_IPM_PACKET_LOOP		SZ_256
106 
107 #define MTK_SPI_IDLE			0
108 #define MTK_SPI_PAUSED			1
109 
110 #define MTK_SPI_32BITS_MASK		(0xffffffff)
111 
112 #define DMA_ADDR_EXT_BITS		(36)
113 #define DMA_ADDR_DEF_BITS		(32)
114 
115 /**
116  * struct mtk_spi_compatible - device data structure
117  * @need_pad_sel:	Enable pad (pins) selection in SPI controller
118  * @must_tx:		Must explicitly send dummy TX bytes to do RX only transfer
119  * @enhance_timing:	Enable adjusting cfg register to enhance time accuracy
120  * @dma_ext:		DMA address extension supported
121  * @no_need_unprepare:	Don't unprepare the SPI clk during runtime
122  * @ipm_design:		Adjust/extend registers to support IPM design IP features
123  */
124 struct mtk_spi_compatible {
125 	bool need_pad_sel;
126 	bool must_tx;
127 	bool enhance_timing;
128 	bool dma_ext;
129 	bool no_need_unprepare;
130 	bool ipm_design;
131 };
132 
133 /**
134  * struct mtk_spi - SPI driver instance
135  * @base:		Start address of the SPI controller registers
136  * @state:		SPI controller state
137  * @pad_num:		Number of pad_sel entries
138  * @pad_sel:		Groups of pins to select
139  * @parent_clk:		Parent of sel_clk
140  * @sel_clk:		SPI host mux clock
141  * @spi_clk:		Peripheral clock
142  * @spi_hclk:		AHB bus clock
143  * @cur_transfer:	Currently processed SPI transfer
144  * @xfer_len:		Number of bytes to transfer
145  * @num_xfered:		Number of transferred bytes
146  * @tx_sgl:		TX transfer scatterlist
147  * @rx_sgl:		RX transfer scatterlist
148  * @tx_sgl_len:		Size of TX DMA transfer
149  * @rx_sgl_len:		Size of RX DMA transfer
150  * @dev_comp:		Device data structure
151  * @qos_request:	QoS request
152  * @spi_clk_hz:		Current SPI clock in Hz
153  * @spimem_done:	SPI-MEM operation completion
154  * @use_spimem:		Enables SPI-MEM
155  * @dev:		Device pointer
156  * @tx_dma:		DMA start for SPI-MEM TX
157  * @rx_dma:		DMA start for SPI-MEM RX
158  */
159 struct mtk_spi {
160 	void __iomem *base;
161 	u32 state;
162 	int pad_num;
163 	u32 *pad_sel;
164 	struct clk *parent_clk, *sel_clk, *spi_clk, *spi_hclk;
165 	struct spi_transfer *cur_transfer;
166 	u32 xfer_len;
167 	u32 num_xfered;
168 	struct scatterlist *tx_sgl, *rx_sgl;
169 	u32 tx_sgl_len, rx_sgl_len;
170 	const struct mtk_spi_compatible *dev_comp;
171 	struct pm_qos_request qos_request;
172 	u32 spi_clk_hz;
173 	struct completion spimem_done;
174 	bool use_spimem;
175 	struct device *dev;
176 	dma_addr_t tx_dma;
177 	dma_addr_t rx_dma;
178 };
179 
180 static const struct mtk_spi_compatible mtk_common_compat;
181 
182 static const struct mtk_spi_compatible mt2712_compat = {
183 	.must_tx = true,
184 };
185 
186 static const struct mtk_spi_compatible mtk_ipm_compat = {
187 	.enhance_timing = true,
188 	.dma_ext = true,
189 	.ipm_design = true,
190 };
191 
192 static const struct mtk_spi_compatible mt6765_compat = {
193 	.need_pad_sel = true,
194 	.must_tx = true,
195 	.enhance_timing = true,
196 	.dma_ext = true,
197 };
198 
199 static const struct mtk_spi_compatible mt7622_compat = {
200 	.must_tx = true,
201 	.enhance_timing = true,
202 };
203 
204 static const struct mtk_spi_compatible mt8173_compat = {
205 	.need_pad_sel = true,
206 	.must_tx = true,
207 };
208 
209 static const struct mtk_spi_compatible mt8183_compat = {
210 	.need_pad_sel = true,
211 	.must_tx = true,
212 	.enhance_timing = true,
213 };
214 
215 static const struct mtk_spi_compatible mt6893_compat = {
216 	.need_pad_sel = true,
217 	.must_tx = true,
218 	.enhance_timing = true,
219 	.dma_ext = true,
220 	.no_need_unprepare = true,
221 };
222 
223 /*
224  * A piece of default chip info unless the platform
225  * supplies it.
226  */
227 static const struct mtk_chip_config mtk_default_chip_info = {
228 	.sample_sel = 0,
229 	.tick_delay = 0,
230 };
231 
232 static const struct of_device_id mtk_spi_of_match[] = {
233 	{ .compatible = "mediatek,spi-ipm",
234 		.data = (void *)&mtk_ipm_compat,
235 	},
236 	{ .compatible = "mediatek,mt2701-spi",
237 		.data = (void *)&mtk_common_compat,
238 	},
239 	{ .compatible = "mediatek,mt2712-spi",
240 		.data = (void *)&mt2712_compat,
241 	},
242 	{ .compatible = "mediatek,mt6589-spi",
243 		.data = (void *)&mtk_common_compat,
244 	},
245 	{ .compatible = "mediatek,mt6765-spi",
246 		.data = (void *)&mt6765_compat,
247 	},
248 	{ .compatible = "mediatek,mt7622-spi",
249 		.data = (void *)&mt7622_compat,
250 	},
251 	{ .compatible = "mediatek,mt7629-spi",
252 		.data = (void *)&mt7622_compat,
253 	},
254 	{ .compatible = "mediatek,mt8135-spi",
255 		.data = (void *)&mtk_common_compat,
256 	},
257 	{ .compatible = "mediatek,mt8173-spi",
258 		.data = (void *)&mt8173_compat,
259 	},
260 	{ .compatible = "mediatek,mt8183-spi",
261 		.data = (void *)&mt8183_compat,
262 	},
263 	{ .compatible = "mediatek,mt8192-spi",
264 		.data = (void *)&mt6765_compat,
265 	},
266 	{ .compatible = "mediatek,mt6893-spi",
267 		.data = (void *)&mt6893_compat,
268 	},
269 	{}
270 };
271 MODULE_DEVICE_TABLE(of, mtk_spi_of_match);
272 
273 static void mtk_spi_reset(struct mtk_spi *mdata)
274 {
275 	u32 reg_val;
276 
277 	/* set the software reset bit in SPI_CMD_REG. */
278 	reg_val = readl(mdata->base + SPI_CMD_REG);
279 	reg_val |= SPI_CMD_RST;
280 	writel(reg_val, mdata->base + SPI_CMD_REG);
281 
282 	reg_val = readl(mdata->base + SPI_CMD_REG);
283 	reg_val &= ~SPI_CMD_RST;
284 	writel(reg_val, mdata->base + SPI_CMD_REG);
285 }
286 
287 static int mtk_spi_set_hw_cs_timing(struct spi_device *spi)
288 {
289 	struct mtk_spi *mdata = spi_controller_get_devdata(spi->controller);
290 	struct spi_delay *cs_setup = &spi->cs_setup;
291 	struct spi_delay *cs_hold = &spi->cs_hold;
292 	struct spi_delay *cs_inactive = &spi->cs_inactive;
293 	u32 setup, hold, inactive;
294 	u32 reg_val;
295 	int delay;
296 
297 	delay = spi_delay_to_ns(cs_setup, NULL);
298 	if (delay < 0)
299 		return delay;
300 	setup = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
301 
302 	delay = spi_delay_to_ns(cs_hold, NULL);
303 	if (delay < 0)
304 		return delay;
305 	hold = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
306 
307 	delay = spi_delay_to_ns(cs_inactive, NULL);
308 	if (delay < 0)
309 		return delay;
310 	inactive = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
311 
312 	if (hold || setup) {
313 		reg_val = readl(mdata->base + SPI_CFG0_REG);
314 		if (mdata->dev_comp->enhance_timing) {
315 			if (hold) {
316 				hold = min_t(u32, hold, 0x10000);
317 				reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
318 				reg_val |= (((hold - 1) & 0xffff)
319 					<< SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
320 			}
321 			if (setup) {
322 				setup = min_t(u32, setup, 0x10000);
323 				reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
324 				reg_val |= (((setup - 1) & 0xffff)
325 					<< SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
326 			}
327 		} else {
328 			if (hold) {
329 				hold = min_t(u32, hold, 0x100);
330 				reg_val &= ~(0xff << SPI_CFG0_CS_HOLD_OFFSET);
331 				reg_val |= (((hold - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
332 			}
333 			if (setup) {
334 				setup = min_t(u32, setup, 0x100);
335 				reg_val &= ~(0xff << SPI_CFG0_CS_SETUP_OFFSET);
336 				reg_val |= (((setup - 1) & 0xff)
337 					<< SPI_CFG0_CS_SETUP_OFFSET);
338 			}
339 		}
340 		writel(reg_val, mdata->base + SPI_CFG0_REG);
341 	}
342 
343 	if (inactive) {
344 		inactive = min_t(u32, inactive, 0x100);
345 		reg_val = readl(mdata->base + SPI_CFG1_REG);
346 		reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
347 		reg_val |= (((inactive - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
348 		writel(reg_val, mdata->base + SPI_CFG1_REG);
349 	}
350 
351 	return 0;
352 }
353 
354 static int mtk_spi_hw_init(struct spi_controller *host,
355 			   struct spi_device *spi)
356 {
357 	u16 cpha, cpol;
358 	u32 reg_val;
359 	struct mtk_chip_config *chip_config = spi->controller_data;
360 	struct mtk_spi *mdata = spi_controller_get_devdata(host);
361 
362 	cpu_latency_qos_update_request(&mdata->qos_request, 500);
363 	cpha = spi->mode & SPI_CPHA ? 1 : 0;
364 	cpol = spi->mode & SPI_CPOL ? 1 : 0;
365 
366 	reg_val = readl(mdata->base + SPI_CMD_REG);
367 	if (mdata->dev_comp->ipm_design) {
368 		/* SPI transfer without idle time until packet length done */
369 		reg_val |= SPI_CMD_IPM_NONIDLE_MODE;
370 		if (spi->mode & SPI_LOOP)
371 			reg_val |= SPI_CMD_IPM_SPIM_LOOP;
372 		else
373 			reg_val &= ~SPI_CMD_IPM_SPIM_LOOP;
374 	}
375 
376 	if (cpha)
377 		reg_val |= SPI_CMD_CPHA;
378 	else
379 		reg_val &= ~SPI_CMD_CPHA;
380 	if (cpol)
381 		reg_val |= SPI_CMD_CPOL;
382 	else
383 		reg_val &= ~SPI_CMD_CPOL;
384 
385 	/* set the mlsbx and mlsbtx */
386 	if (spi->mode & SPI_LSB_FIRST) {
387 		reg_val &= ~SPI_CMD_TXMSBF;
388 		reg_val &= ~SPI_CMD_RXMSBF;
389 	} else {
390 		reg_val |= SPI_CMD_TXMSBF;
391 		reg_val |= SPI_CMD_RXMSBF;
392 	}
393 
394 	/* set the tx/rx endian */
395 #ifdef __LITTLE_ENDIAN
396 	reg_val &= ~SPI_CMD_TX_ENDIAN;
397 	reg_val &= ~SPI_CMD_RX_ENDIAN;
398 #else
399 	reg_val |= SPI_CMD_TX_ENDIAN;
400 	reg_val |= SPI_CMD_RX_ENDIAN;
401 #endif
402 
403 	if (mdata->dev_comp->enhance_timing) {
404 		/* set CS polarity */
405 		if (spi->mode & SPI_CS_HIGH)
406 			reg_val |= SPI_CMD_CS_POL;
407 		else
408 			reg_val &= ~SPI_CMD_CS_POL;
409 
410 		if (chip_config->sample_sel)
411 			reg_val |= SPI_CMD_SAMPLE_SEL;
412 		else
413 			reg_val &= ~SPI_CMD_SAMPLE_SEL;
414 	}
415 
416 	/* set finish and pause interrupt always enable */
417 	reg_val |= SPI_CMD_FINISH_IE | SPI_CMD_PAUSE_IE;
418 
419 	/* disable dma mode */
420 	reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA);
421 
422 	/* disable deassert mode */
423 	reg_val &= ~SPI_CMD_DEASSERT;
424 
425 	writel(reg_val, mdata->base + SPI_CMD_REG);
426 
427 	/* pad select */
428 	if (mdata->dev_comp->need_pad_sel)
429 		writel(mdata->pad_sel[spi_get_chipselect(spi, 0)],
430 		       mdata->base + SPI_PAD_SEL_REG);
431 
432 	/* tick delay */
433 	if (mdata->dev_comp->enhance_timing) {
434 		if (mdata->dev_comp->ipm_design) {
435 			reg_val = readl(mdata->base + SPI_CMD_REG);
436 			reg_val &= ~SPI_CMD_IPM_GET_TICKDLY_MASK;
437 			reg_val |= ((chip_config->tick_delay & 0x7)
438 				    << SPI_CMD_IPM_GET_TICKDLY_OFFSET);
439 			writel(reg_val, mdata->base + SPI_CMD_REG);
440 		} else {
441 			reg_val = readl(mdata->base + SPI_CFG1_REG);
442 			reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK;
443 			reg_val |= ((chip_config->tick_delay & 0x7)
444 				    << SPI_CFG1_GET_TICK_DLY_OFFSET);
445 			writel(reg_val, mdata->base + SPI_CFG1_REG);
446 		}
447 	} else {
448 		reg_val = readl(mdata->base + SPI_CFG1_REG);
449 		reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK_V1;
450 		reg_val |= ((chip_config->tick_delay & 0x3)
451 			    << SPI_CFG1_GET_TICK_DLY_OFFSET_V1);
452 		writel(reg_val, mdata->base + SPI_CFG1_REG);
453 	}
454 
455 	/* set hw cs timing */
456 	mtk_spi_set_hw_cs_timing(spi);
457 	return 0;
458 }
459 
460 static int mtk_spi_prepare_message(struct spi_controller *host,
461 				   struct spi_message *msg)
462 {
463 	return mtk_spi_hw_init(host, msg->spi);
464 }
465 
466 static int mtk_spi_unprepare_message(struct spi_controller *host,
467 				     struct spi_message *message)
468 {
469 	struct mtk_spi *mdata = spi_controller_get_devdata(host);
470 
471 	cpu_latency_qos_update_request(&mdata->qos_request, PM_QOS_DEFAULT_VALUE);
472 	return 0;
473 }
474 
475 static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
476 {
477 	u32 reg_val;
478 	struct mtk_spi *mdata = spi_controller_get_devdata(spi->controller);
479 
480 	if (spi->mode & SPI_CS_HIGH)
481 		enable = !enable;
482 
483 	reg_val = readl(mdata->base + SPI_CMD_REG);
484 	if (!enable) {
485 		reg_val |= SPI_CMD_PAUSE_EN;
486 		writel(reg_val, mdata->base + SPI_CMD_REG);
487 	} else {
488 		reg_val &= ~SPI_CMD_PAUSE_EN;
489 		writel(reg_val, mdata->base + SPI_CMD_REG);
490 		mdata->state = MTK_SPI_IDLE;
491 		mtk_spi_reset(mdata);
492 	}
493 }
494 
495 static void mtk_spi_prepare_transfer(struct spi_controller *host,
496 				     u32 speed_hz)
497 {
498 	u32 div, sck_time, reg_val;
499 	struct mtk_spi *mdata = spi_controller_get_devdata(host);
500 
501 	if (speed_hz < mdata->spi_clk_hz / 2)
502 		div = DIV_ROUND_UP(mdata->spi_clk_hz, speed_hz);
503 	else
504 		div = 1;
505 
506 	sck_time = (div + 1) / 2;
507 
508 	if (mdata->dev_comp->enhance_timing) {
509 		reg_val = readl(mdata->base + SPI_CFG2_REG);
510 		reg_val &= ~(0xffff << SPI_CFG2_SCK_HIGH_OFFSET);
511 		reg_val |= (((sck_time - 1) & 0xffff)
512 			   << SPI_CFG2_SCK_HIGH_OFFSET);
513 		reg_val &= ~(0xffff << SPI_CFG2_SCK_LOW_OFFSET);
514 		reg_val |= (((sck_time - 1) & 0xffff)
515 			   << SPI_CFG2_SCK_LOW_OFFSET);
516 		writel(reg_val, mdata->base + SPI_CFG2_REG);
517 	} else {
518 		reg_val = readl(mdata->base + SPI_CFG0_REG);
519 		reg_val &= ~(0xff << SPI_CFG0_SCK_HIGH_OFFSET);
520 		reg_val |= (((sck_time - 1) & 0xff)
521 			   << SPI_CFG0_SCK_HIGH_OFFSET);
522 		reg_val &= ~(0xff << SPI_CFG0_SCK_LOW_OFFSET);
523 		reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET);
524 		writel(reg_val, mdata->base + SPI_CFG0_REG);
525 	}
526 }
527 
528 static void mtk_spi_setup_packet(struct spi_controller *host)
529 {
530 	u32 packet_size, packet_loop, reg_val;
531 	struct mtk_spi *mdata = spi_controller_get_devdata(host);
532 
533 	if (mdata->dev_comp->ipm_design)
534 		packet_size = min_t(u32,
535 				    mdata->xfer_len,
536 				    MTK_SPI_IPM_PACKET_SIZE);
537 	else
538 		packet_size = min_t(u32,
539 				    mdata->xfer_len,
540 				    MTK_SPI_PACKET_SIZE);
541 
542 	packet_loop = mdata->xfer_len / packet_size;
543 
544 	reg_val = readl(mdata->base + SPI_CFG1_REG);
545 	if (mdata->dev_comp->ipm_design)
546 		reg_val &= ~SPI_CFG1_IPM_PACKET_LENGTH_MASK;
547 	else
548 		reg_val &= ~SPI_CFG1_PACKET_LENGTH_MASK;
549 	reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
550 	reg_val &= ~SPI_CFG1_PACKET_LOOP_MASK;
551 	reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
552 	writel(reg_val, mdata->base + SPI_CFG1_REG);
553 }
554 
555 static void mtk_spi_enable_transfer(struct spi_controller *host)
556 {
557 	u32 cmd;
558 	struct mtk_spi *mdata = spi_controller_get_devdata(host);
559 
560 	cmd = readl(mdata->base + SPI_CMD_REG);
561 	if (mdata->state == MTK_SPI_IDLE)
562 		cmd |= SPI_CMD_ACT;
563 	else
564 		cmd |= SPI_CMD_RESUME;
565 	writel(cmd, mdata->base + SPI_CMD_REG);
566 }
567 
568 static int mtk_spi_get_mult_delta(struct mtk_spi *mdata, u32 xfer_len)
569 {
570 	u32 mult_delta = 0;
571 
572 	if (mdata->dev_comp->ipm_design) {
573 		if (xfer_len > MTK_SPI_IPM_PACKET_SIZE)
574 			mult_delta = xfer_len % MTK_SPI_IPM_PACKET_SIZE;
575 	} else {
576 		if (xfer_len > MTK_SPI_PACKET_SIZE)
577 			mult_delta = xfer_len % MTK_SPI_PACKET_SIZE;
578 	}
579 
580 	return mult_delta;
581 }
582 
583 static void mtk_spi_update_mdata_len(struct spi_controller *host)
584 {
585 	int mult_delta;
586 	struct mtk_spi *mdata = spi_controller_get_devdata(host);
587 
588 	if (mdata->tx_sgl_len && mdata->rx_sgl_len) {
589 		if (mdata->tx_sgl_len > mdata->rx_sgl_len) {
590 			mult_delta = mtk_spi_get_mult_delta(mdata, mdata->rx_sgl_len);
591 			mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
592 			mdata->rx_sgl_len = mult_delta;
593 			mdata->tx_sgl_len -= mdata->xfer_len;
594 		} else {
595 			mult_delta = mtk_spi_get_mult_delta(mdata, mdata->tx_sgl_len);
596 			mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
597 			mdata->tx_sgl_len = mult_delta;
598 			mdata->rx_sgl_len -= mdata->xfer_len;
599 		}
600 	} else if (mdata->tx_sgl_len) {
601 		mult_delta = mtk_spi_get_mult_delta(mdata, mdata->tx_sgl_len);
602 		mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
603 		mdata->tx_sgl_len = mult_delta;
604 	} else if (mdata->rx_sgl_len) {
605 		mult_delta = mtk_spi_get_mult_delta(mdata, mdata->rx_sgl_len);
606 		mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
607 		mdata->rx_sgl_len = mult_delta;
608 	}
609 }
610 
611 static void mtk_spi_setup_dma_addr(struct spi_controller *host,
612 				   struct spi_transfer *xfer)
613 {
614 	struct mtk_spi *mdata = spi_controller_get_devdata(host);
615 
616 	if (mdata->tx_sgl) {
617 		writel((u32)(xfer->tx_dma & MTK_SPI_32BITS_MASK),
618 		       mdata->base + SPI_TX_SRC_REG);
619 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
620 		if (mdata->dev_comp->dma_ext)
621 			writel((u32)(xfer->tx_dma >> 32),
622 			       mdata->base + SPI_TX_SRC_REG_64);
623 #endif
624 	}
625 
626 	if (mdata->rx_sgl) {
627 		writel((u32)(xfer->rx_dma & MTK_SPI_32BITS_MASK),
628 		       mdata->base + SPI_RX_DST_REG);
629 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
630 		if (mdata->dev_comp->dma_ext)
631 			writel((u32)(xfer->rx_dma >> 32),
632 			       mdata->base + SPI_RX_DST_REG_64);
633 #endif
634 	}
635 }
636 
637 static int mtk_spi_fifo_transfer(struct spi_controller *host,
638 				 struct spi_device *spi,
639 				 struct spi_transfer *xfer)
640 {
641 	int cnt, remainder;
642 	u32 reg_val;
643 	struct mtk_spi *mdata = spi_controller_get_devdata(host);
644 
645 	mdata->cur_transfer = xfer;
646 	mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, xfer->len);
647 	mdata->num_xfered = 0;
648 	mtk_spi_prepare_transfer(host, xfer->speed_hz);
649 	mtk_spi_setup_packet(host);
650 
651 	if (xfer->tx_buf) {
652 		cnt = xfer->len / 4;
653 		iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt);
654 		remainder = xfer->len % 4;
655 		if (remainder > 0) {
656 			reg_val = 0;
657 			memcpy(&reg_val, xfer->tx_buf + (cnt * 4), remainder);
658 			writel(reg_val, mdata->base + SPI_TX_DATA_REG);
659 		}
660 	}
661 
662 	mtk_spi_enable_transfer(host);
663 
664 	return 1;
665 }
666 
667 static int mtk_spi_dma_transfer(struct spi_controller *host,
668 				struct spi_device *spi,
669 				struct spi_transfer *xfer)
670 {
671 	int cmd;
672 	struct mtk_spi *mdata = spi_controller_get_devdata(host);
673 
674 	mdata->tx_sgl = NULL;
675 	mdata->rx_sgl = NULL;
676 	mdata->tx_sgl_len = 0;
677 	mdata->rx_sgl_len = 0;
678 	mdata->cur_transfer = xfer;
679 	mdata->num_xfered = 0;
680 
681 	mtk_spi_prepare_transfer(host, xfer->speed_hz);
682 
683 	cmd = readl(mdata->base + SPI_CMD_REG);
684 	if (xfer->tx_buf)
685 		cmd |= SPI_CMD_TX_DMA;
686 	if (xfer->rx_buf)
687 		cmd |= SPI_CMD_RX_DMA;
688 	writel(cmd, mdata->base + SPI_CMD_REG);
689 
690 	if (xfer->tx_buf)
691 		mdata->tx_sgl = xfer->tx_sg.sgl;
692 	if (xfer->rx_buf)
693 		mdata->rx_sgl = xfer->rx_sg.sgl;
694 
695 	if (mdata->tx_sgl) {
696 		xfer->tx_dma = sg_dma_address(mdata->tx_sgl);
697 		mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
698 	}
699 	if (mdata->rx_sgl) {
700 		xfer->rx_dma = sg_dma_address(mdata->rx_sgl);
701 		mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
702 	}
703 
704 	mtk_spi_update_mdata_len(host);
705 	mtk_spi_setup_packet(host);
706 	mtk_spi_setup_dma_addr(host, xfer);
707 	mtk_spi_enable_transfer(host);
708 
709 	return 1;
710 }
711 
712 static int mtk_spi_transfer_one(struct spi_controller *host,
713 				struct spi_device *spi,
714 				struct spi_transfer *xfer)
715 {
716 	struct mtk_spi *mdata = spi_controller_get_devdata(spi->controller);
717 	u32 reg_val = 0;
718 
719 	/* prepare xfer direction and duplex mode */
720 	if (mdata->dev_comp->ipm_design) {
721 		if (!xfer->tx_buf || !xfer->rx_buf) {
722 			reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
723 			if (xfer->rx_buf)
724 				reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
725 		}
726 		writel(reg_val, mdata->base + SPI_CFG3_IPM_REG);
727 	}
728 
729 	if (host->can_dma(host, spi, xfer))
730 		return mtk_spi_dma_transfer(host, spi, xfer);
731 	else
732 		return mtk_spi_fifo_transfer(host, spi, xfer);
733 }
734 
735 static bool mtk_spi_can_dma(struct spi_controller *host,
736 			    struct spi_device *spi,
737 			    struct spi_transfer *xfer)
738 {
739 	/* Buffers for DMA transactions must be 4-byte aligned */
740 	return (xfer->len > MTK_SPI_MAX_FIFO_SIZE &&
741 		(unsigned long)xfer->tx_buf % 4 == 0 &&
742 		(unsigned long)xfer->rx_buf % 4 == 0);
743 }
744 
745 static int mtk_spi_setup(struct spi_device *spi)
746 {
747 	struct mtk_spi *mdata = spi_controller_get_devdata(spi->controller);
748 
749 	if (!spi->controller_data)
750 		spi->controller_data = (void *)&mtk_default_chip_info;
751 
752 	if (mdata->dev_comp->need_pad_sel && spi_get_csgpiod(spi, 0))
753 		/* CS de-asserted, gpiolib will handle inversion */
754 		gpiod_direction_output(spi_get_csgpiod(spi, 0), 0);
755 
756 	return 0;
757 }
758 
759 static irqreturn_t mtk_spi_interrupt_thread(int irq, void *dev_id)
760 {
761 	u32 cmd, reg_val, cnt, remainder, len;
762 	struct spi_controller *host = dev_id;
763 	struct mtk_spi *mdata = spi_controller_get_devdata(host);
764 	struct spi_transfer *xfer = mdata->cur_transfer;
765 
766 	if (!host->can_dma(host, NULL, xfer)) {
767 		if (xfer->rx_buf) {
768 			cnt = mdata->xfer_len / 4;
769 			ioread32_rep(mdata->base + SPI_RX_DATA_REG,
770 				     xfer->rx_buf + mdata->num_xfered, cnt);
771 			remainder = mdata->xfer_len % 4;
772 			if (remainder > 0) {
773 				reg_val = readl(mdata->base + SPI_RX_DATA_REG);
774 				memcpy(xfer->rx_buf + (cnt * 4) + mdata->num_xfered,
775 					&reg_val,
776 					remainder);
777 			}
778 		}
779 
780 		mdata->num_xfered += mdata->xfer_len;
781 		if (mdata->num_xfered == xfer->len) {
782 			spi_finalize_current_transfer(host);
783 			return IRQ_HANDLED;
784 		}
785 
786 		len = xfer->len - mdata->num_xfered;
787 		mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len);
788 		mtk_spi_setup_packet(host);
789 
790 		if (xfer->tx_buf) {
791 			cnt = mdata->xfer_len / 4;
792 			iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
793 					xfer->tx_buf + mdata->num_xfered, cnt);
794 
795 			remainder = mdata->xfer_len % 4;
796 			if (remainder > 0) {
797 				reg_val = 0;
798 				memcpy(&reg_val,
799 					xfer->tx_buf + (cnt * 4) + mdata->num_xfered,
800 					remainder);
801 				writel(reg_val, mdata->base + SPI_TX_DATA_REG);
802 			}
803 		}
804 
805 		mtk_spi_enable_transfer(host);
806 
807 		return IRQ_HANDLED;
808 	}
809 
810 	if (mdata->tx_sgl)
811 		xfer->tx_dma += mdata->xfer_len;
812 	if (mdata->rx_sgl)
813 		xfer->rx_dma += mdata->xfer_len;
814 
815 	if (mdata->tx_sgl && (mdata->tx_sgl_len == 0)) {
816 		mdata->tx_sgl = sg_next(mdata->tx_sgl);
817 		if (mdata->tx_sgl) {
818 			xfer->tx_dma = sg_dma_address(mdata->tx_sgl);
819 			mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
820 		}
821 	}
822 	if (mdata->rx_sgl && (mdata->rx_sgl_len == 0)) {
823 		mdata->rx_sgl = sg_next(mdata->rx_sgl);
824 		if (mdata->rx_sgl) {
825 			xfer->rx_dma = sg_dma_address(mdata->rx_sgl);
826 			mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
827 		}
828 	}
829 
830 	if (!mdata->tx_sgl && !mdata->rx_sgl) {
831 		/* spi disable dma */
832 		cmd = readl(mdata->base + SPI_CMD_REG);
833 		cmd &= ~SPI_CMD_TX_DMA;
834 		cmd &= ~SPI_CMD_RX_DMA;
835 		writel(cmd, mdata->base + SPI_CMD_REG);
836 
837 		spi_finalize_current_transfer(host);
838 		return IRQ_HANDLED;
839 	}
840 
841 	mtk_spi_update_mdata_len(host);
842 	mtk_spi_setup_packet(host);
843 	mtk_spi_setup_dma_addr(host, xfer);
844 	mtk_spi_enable_transfer(host);
845 
846 	return IRQ_HANDLED;
847 }
848 
849 static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
850 {
851 	struct spi_controller *host = dev_id;
852 	struct mtk_spi *mdata = spi_controller_get_devdata(host);
853 	u32 reg_val;
854 
855 	reg_val = readl(mdata->base + SPI_STATUS0_REG);
856 	if (reg_val & MTK_SPI_PAUSE_INT_STATUS)
857 		mdata->state = MTK_SPI_PAUSED;
858 	else
859 		mdata->state = MTK_SPI_IDLE;
860 
861 	/* SPI-MEM ops */
862 	if (mdata->use_spimem) {
863 		complete(&mdata->spimem_done);
864 		return IRQ_HANDLED;
865 	}
866 
867 	return IRQ_WAKE_THREAD;
868 }
869 
870 static int mtk_spi_mem_adjust_op_size(struct spi_mem *mem,
871 				      struct spi_mem_op *op)
872 {
873 	int opcode_len;
874 
875 	if (op->data.dir != SPI_MEM_NO_DATA) {
876 		opcode_len = 1 + op->addr.nbytes + op->dummy.nbytes;
877 		if (opcode_len + op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
878 			op->data.nbytes = MTK_SPI_IPM_PACKET_SIZE - opcode_len;
879 			/* force data buffer dma-aligned. */
880 			op->data.nbytes -= op->data.nbytes % 4;
881 		}
882 	}
883 
884 	return 0;
885 }
886 
887 static bool mtk_spi_mem_supports_op(struct spi_mem *mem,
888 				    const struct spi_mem_op *op)
889 {
890 	if (!spi_mem_default_supports_op(mem, op))
891 		return false;
892 
893 	if (op->addr.nbytes && op->dummy.nbytes &&
894 	    op->addr.buswidth != op->dummy.buswidth)
895 		return false;
896 
897 	if (op->addr.nbytes + op->dummy.nbytes > 16)
898 		return false;
899 
900 	if (op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
901 		if (op->data.nbytes / MTK_SPI_IPM_PACKET_SIZE >
902 		    MTK_SPI_IPM_PACKET_LOOP ||
903 		    op->data.nbytes % MTK_SPI_IPM_PACKET_SIZE != 0)
904 			return false;
905 	}
906 
907 	return true;
908 }
909 
910 static void mtk_spi_mem_setup_dma_xfer(struct spi_controller *host,
911 				       const struct spi_mem_op *op)
912 {
913 	struct mtk_spi *mdata = spi_controller_get_devdata(host);
914 
915 	writel((u32)(mdata->tx_dma & MTK_SPI_32BITS_MASK),
916 	       mdata->base + SPI_TX_SRC_REG);
917 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
918 	if (mdata->dev_comp->dma_ext)
919 		writel((u32)(mdata->tx_dma >> 32),
920 		       mdata->base + SPI_TX_SRC_REG_64);
921 #endif
922 
923 	if (op->data.dir == SPI_MEM_DATA_IN) {
924 		writel((u32)(mdata->rx_dma & MTK_SPI_32BITS_MASK),
925 		       mdata->base + SPI_RX_DST_REG);
926 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
927 		if (mdata->dev_comp->dma_ext)
928 			writel((u32)(mdata->rx_dma >> 32),
929 			       mdata->base + SPI_RX_DST_REG_64);
930 #endif
931 	}
932 }
933 
934 static int mtk_spi_transfer_wait(struct spi_mem *mem,
935 				 const struct spi_mem_op *op)
936 {
937 	struct mtk_spi *mdata = spi_controller_get_devdata(mem->spi->controller);
938 	/*
939 	 * For each byte we wait for 8 cycles of the SPI clock.
940 	 * Since speed is defined in Hz and we want milliseconds,
941 	 * so it should be 8 * 1000.
942 	 */
943 	u64 ms = 8000LL;
944 
945 	if (op->data.dir == SPI_MEM_NO_DATA)
946 		ms *= 32; /* prevent we may get 0 for short transfers. */
947 	else
948 		ms *= op->data.nbytes;
949 	ms = div_u64(ms, mem->spi->max_speed_hz);
950 	ms += ms + 1000; /* 1s tolerance */
951 
952 	if (ms > UINT_MAX)
953 		ms = UINT_MAX;
954 
955 	if (!wait_for_completion_timeout(&mdata->spimem_done,
956 					 msecs_to_jiffies(ms))) {
957 		dev_err(mdata->dev, "spi-mem transfer timeout\n");
958 		return -ETIMEDOUT;
959 	}
960 
961 	return 0;
962 }
963 
964 static int mtk_spi_mem_exec_op(struct spi_mem *mem,
965 			       const struct spi_mem_op *op)
966 {
967 	struct mtk_spi *mdata = spi_controller_get_devdata(mem->spi->controller);
968 	u32 reg_val, nio, tx_size;
969 	char *tx_tmp_buf, *rx_tmp_buf;
970 	int ret = 0;
971 
972 	mdata->use_spimem = true;
973 	reinit_completion(&mdata->spimem_done);
974 
975 	mtk_spi_reset(mdata);
976 	mtk_spi_hw_init(mem->spi->controller, mem->spi);
977 	mtk_spi_prepare_transfer(mem->spi->controller, op->max_freq);
978 
979 	reg_val = readl(mdata->base + SPI_CFG3_IPM_REG);
980 	/* opcode byte len */
981 	reg_val &= ~SPI_CFG3_IPM_CMD_BYTELEN_MASK;
982 	reg_val |= 1 << SPI_CFG3_IPM_CMD_BYTELEN_OFFSET;
983 
984 	/* addr & dummy byte len */
985 	reg_val &= ~SPI_CFG3_IPM_ADDR_BYTELEN_MASK;
986 	if (op->addr.nbytes || op->dummy.nbytes)
987 		reg_val |= (op->addr.nbytes + op->dummy.nbytes) <<
988 			    SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET;
989 
990 	/* data byte len */
991 	if (op->data.dir == SPI_MEM_NO_DATA) {
992 		reg_val |= SPI_CFG3_IPM_NODATA_FLAG;
993 		writel(0, mdata->base + SPI_CFG1_REG);
994 	} else {
995 		reg_val &= ~SPI_CFG3_IPM_NODATA_FLAG;
996 		mdata->xfer_len = op->data.nbytes;
997 		mtk_spi_setup_packet(mem->spi->controller);
998 	}
999 
1000 	if (op->addr.nbytes || op->dummy.nbytes) {
1001 		if (op->addr.buswidth == 1 || op->dummy.buswidth == 1)
1002 			reg_val |= SPI_CFG3_IPM_XMODE_EN;
1003 		else
1004 			reg_val &= ~SPI_CFG3_IPM_XMODE_EN;
1005 	}
1006 
1007 	if (op->addr.buswidth == 2 ||
1008 	    op->dummy.buswidth == 2 ||
1009 	    op->data.buswidth == 2)
1010 		nio = 2;
1011 	else if (op->addr.buswidth == 4 ||
1012 		 op->dummy.buswidth == 4 ||
1013 		 op->data.buswidth == 4)
1014 		nio = 4;
1015 	else
1016 		nio = 1;
1017 
1018 	reg_val &= ~SPI_CFG3_IPM_CMD_PIN_MODE_MASK;
1019 	reg_val |= PIN_MODE_CFG(nio);
1020 
1021 	reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
1022 	if (op->data.dir == SPI_MEM_DATA_IN)
1023 		reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
1024 	else
1025 		reg_val &= ~SPI_CFG3_IPM_HALF_DUPLEX_DIR;
1026 	writel(reg_val, mdata->base + SPI_CFG3_IPM_REG);
1027 
1028 	tx_size = 1 + op->addr.nbytes + op->dummy.nbytes;
1029 	if (op->data.dir == SPI_MEM_DATA_OUT)
1030 		tx_size += op->data.nbytes;
1031 
1032 	tx_size = max_t(u32, tx_size, 32);
1033 
1034 	tx_tmp_buf = kzalloc(tx_size, GFP_KERNEL | GFP_DMA);
1035 	if (!tx_tmp_buf) {
1036 		mdata->use_spimem = false;
1037 		return -ENOMEM;
1038 	}
1039 
1040 	tx_tmp_buf[0] = op->cmd.opcode;
1041 
1042 	if (op->addr.nbytes) {
1043 		int i;
1044 
1045 		for (i = 0; i < op->addr.nbytes; i++)
1046 			tx_tmp_buf[i + 1] = op->addr.val >>
1047 					(8 * (op->addr.nbytes - i - 1));
1048 	}
1049 
1050 	if (op->dummy.nbytes)
1051 		memset(tx_tmp_buf + op->addr.nbytes + 1,
1052 		       0xff,
1053 		       op->dummy.nbytes);
1054 
1055 	if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
1056 		memcpy(tx_tmp_buf + op->dummy.nbytes + op->addr.nbytes + 1,
1057 		       op->data.buf.out,
1058 		       op->data.nbytes);
1059 
1060 	mdata->tx_dma = dma_map_single(mdata->dev, tx_tmp_buf,
1061 				       tx_size, DMA_TO_DEVICE);
1062 	if (dma_mapping_error(mdata->dev, mdata->tx_dma)) {
1063 		ret = -ENOMEM;
1064 		goto err_exit;
1065 	}
1066 
1067 	if (op->data.dir == SPI_MEM_DATA_IN) {
1068 		if (!IS_ALIGNED((size_t)op->data.buf.in, 4)) {
1069 			rx_tmp_buf = kzalloc(op->data.nbytes,
1070 					     GFP_KERNEL | GFP_DMA);
1071 			if (!rx_tmp_buf) {
1072 				ret = -ENOMEM;
1073 				goto unmap_tx_dma;
1074 			}
1075 		} else {
1076 			rx_tmp_buf = op->data.buf.in;
1077 		}
1078 
1079 		mdata->rx_dma = dma_map_single(mdata->dev,
1080 					       rx_tmp_buf,
1081 					       op->data.nbytes,
1082 					       DMA_FROM_DEVICE);
1083 		if (dma_mapping_error(mdata->dev, mdata->rx_dma)) {
1084 			ret = -ENOMEM;
1085 			goto kfree_rx_tmp_buf;
1086 		}
1087 	}
1088 
1089 	reg_val = readl(mdata->base + SPI_CMD_REG);
1090 	reg_val |= SPI_CMD_TX_DMA;
1091 	if (op->data.dir == SPI_MEM_DATA_IN)
1092 		reg_val |= SPI_CMD_RX_DMA;
1093 	writel(reg_val, mdata->base + SPI_CMD_REG);
1094 
1095 	mtk_spi_mem_setup_dma_xfer(mem->spi->controller, op);
1096 
1097 	mtk_spi_enable_transfer(mem->spi->controller);
1098 
1099 	/* Wait for the interrupt. */
1100 	ret = mtk_spi_transfer_wait(mem, op);
1101 	if (ret)
1102 		goto unmap_rx_dma;
1103 
1104 	/* spi disable dma */
1105 	reg_val = readl(mdata->base + SPI_CMD_REG);
1106 	reg_val &= ~SPI_CMD_TX_DMA;
1107 	if (op->data.dir == SPI_MEM_DATA_IN)
1108 		reg_val &= ~SPI_CMD_RX_DMA;
1109 	writel(reg_val, mdata->base + SPI_CMD_REG);
1110 
1111 unmap_rx_dma:
1112 	if (op->data.dir == SPI_MEM_DATA_IN) {
1113 		dma_unmap_single(mdata->dev, mdata->rx_dma,
1114 				 op->data.nbytes, DMA_FROM_DEVICE);
1115 		if (!IS_ALIGNED((size_t)op->data.buf.in, 4))
1116 			memcpy(op->data.buf.in, rx_tmp_buf, op->data.nbytes);
1117 	}
1118 kfree_rx_tmp_buf:
1119 	if (op->data.dir == SPI_MEM_DATA_IN &&
1120 	    !IS_ALIGNED((size_t)op->data.buf.in, 4))
1121 		kfree(rx_tmp_buf);
1122 unmap_tx_dma:
1123 	dma_unmap_single(mdata->dev, mdata->tx_dma,
1124 			 tx_size, DMA_TO_DEVICE);
1125 err_exit:
1126 	kfree(tx_tmp_buf);
1127 	mdata->use_spimem = false;
1128 
1129 	return ret;
1130 }
1131 
1132 static const struct spi_controller_mem_ops mtk_spi_mem_ops = {
1133 	.adjust_op_size = mtk_spi_mem_adjust_op_size,
1134 	.supports_op = mtk_spi_mem_supports_op,
1135 	.exec_op = mtk_spi_mem_exec_op,
1136 };
1137 
1138 static const struct spi_controller_mem_caps mtk_spi_mem_caps = {
1139 	.per_op_freq = true,
1140 };
1141 
1142 static int mtk_spi_probe(struct platform_device *pdev)
1143 {
1144 	struct device *dev = &pdev->dev;
1145 	struct spi_controller *host;
1146 	struct mtk_spi *mdata;
1147 	int i, irq, ret, addr_bits;
1148 
1149 	host = devm_spi_alloc_host(dev, sizeof(*mdata));
1150 	if (!host)
1151 		return dev_err_probe(dev, -ENOMEM, "failed to alloc spi host\n");
1152 
1153 	host->auto_runtime_pm = true;
1154 	host->dev.of_node = dev->of_node;
1155 	host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
1156 
1157 	host->set_cs = mtk_spi_set_cs;
1158 	host->prepare_message = mtk_spi_prepare_message;
1159 	host->unprepare_message = mtk_spi_unprepare_message;
1160 	host->transfer_one = mtk_spi_transfer_one;
1161 	host->can_dma = mtk_spi_can_dma;
1162 	host->setup = mtk_spi_setup;
1163 	host->set_cs_timing = mtk_spi_set_hw_cs_timing;
1164 	host->use_gpio_descriptors = true;
1165 
1166 	mdata = spi_controller_get_devdata(host);
1167 	mdata->dev_comp = device_get_match_data(dev);
1168 
1169 	if (mdata->dev_comp->enhance_timing)
1170 		host->mode_bits |= SPI_CS_HIGH;
1171 
1172 	if (mdata->dev_comp->must_tx)
1173 		host->flags = SPI_CONTROLLER_MUST_TX;
1174 	if (mdata->dev_comp->ipm_design)
1175 		host->mode_bits |= SPI_LOOP | SPI_RX_DUAL | SPI_TX_DUAL |
1176 				   SPI_RX_QUAD | SPI_TX_QUAD;
1177 
1178 	if (mdata->dev_comp->ipm_design) {
1179 		mdata->dev = dev;
1180 		host->mem_ops = &mtk_spi_mem_ops;
1181 		host->mem_caps = &mtk_spi_mem_caps;
1182 		init_completion(&mdata->spimem_done);
1183 	}
1184 
1185 	if (mdata->dev_comp->need_pad_sel) {
1186 		mdata->pad_num = of_property_count_u32_elems(dev->of_node,
1187 			"mediatek,pad-select");
1188 		if (mdata->pad_num < 0)
1189 			return dev_err_probe(dev, -EINVAL,
1190 				"No 'mediatek,pad-select' property\n");
1191 
1192 		mdata->pad_sel = devm_kmalloc_array(dev, mdata->pad_num,
1193 						    sizeof(u32), GFP_KERNEL);
1194 		if (!mdata->pad_sel)
1195 			return -ENOMEM;
1196 
1197 		for (i = 0; i < mdata->pad_num; i++) {
1198 			of_property_read_u32_index(dev->of_node,
1199 						   "mediatek,pad-select",
1200 						   i, &mdata->pad_sel[i]);
1201 			if (mdata->pad_sel[i] > MT8173_SPI_MAX_PAD_SEL)
1202 				return dev_err_probe(dev, -EINVAL,
1203 						     "wrong pad-sel[%d]: %u\n",
1204 						     i, mdata->pad_sel[i]);
1205 		}
1206 	}
1207 
1208 	platform_set_drvdata(pdev, host);
1209 	mdata->base = devm_platform_ioremap_resource(pdev, 0);
1210 	if (IS_ERR(mdata->base))
1211 		return PTR_ERR(mdata->base);
1212 
1213 	irq = platform_get_irq(pdev, 0);
1214 	if (irq < 0)
1215 		return irq;
1216 
1217 	if (!dev->dma_mask)
1218 		dev->dma_mask = &dev->coherent_dma_mask;
1219 
1220 	if (mdata->dev_comp->ipm_design)
1221 		dma_set_max_seg_size(dev, SZ_16M);
1222 	else
1223 		dma_set_max_seg_size(dev, SZ_256K);
1224 
1225 	mdata->parent_clk = devm_clk_get(dev, "parent-clk");
1226 	if (IS_ERR(mdata->parent_clk))
1227 		return dev_err_probe(dev, PTR_ERR(mdata->parent_clk),
1228 				     "failed to get parent-clk\n");
1229 
1230 	mdata->sel_clk = devm_clk_get(dev, "sel-clk");
1231 	if (IS_ERR(mdata->sel_clk))
1232 		return dev_err_probe(dev, PTR_ERR(mdata->sel_clk), "failed to get sel-clk\n");
1233 
1234 	mdata->spi_clk = devm_clk_get(dev, "spi-clk");
1235 	if (IS_ERR(mdata->spi_clk))
1236 		return dev_err_probe(dev, PTR_ERR(mdata->spi_clk), "failed to get spi-clk\n");
1237 
1238 	mdata->spi_hclk = devm_clk_get_optional(dev, "hclk");
1239 	if (IS_ERR(mdata->spi_hclk))
1240 		return dev_err_probe(dev, PTR_ERR(mdata->spi_hclk), "failed to get hclk\n");
1241 
1242 	ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk);
1243 	if (ret < 0)
1244 		return dev_err_probe(dev, ret, "failed to clk_set_parent\n");
1245 
1246 	ret = clk_prepare_enable(mdata->spi_hclk);
1247 	if (ret < 0)
1248 		return dev_err_probe(dev, ret, "failed to enable hclk\n");
1249 
1250 	ret = clk_prepare_enable(mdata->spi_clk);
1251 	if (ret < 0) {
1252 		clk_disable_unprepare(mdata->spi_hclk);
1253 		return dev_err_probe(dev, ret, "failed to enable spi_clk\n");
1254 	}
1255 
1256 	mdata->spi_clk_hz = clk_get_rate(mdata->spi_clk);
1257 
1258 	if (mdata->dev_comp->no_need_unprepare) {
1259 		clk_disable(mdata->spi_clk);
1260 		clk_disable(mdata->spi_hclk);
1261 	} else {
1262 		clk_disable_unprepare(mdata->spi_clk);
1263 		clk_disable_unprepare(mdata->spi_hclk);
1264 	}
1265 
1266 	cpu_latency_qos_add_request(&mdata->qos_request, PM_QOS_DEFAULT_VALUE);
1267 
1268 	if (mdata->dev_comp->need_pad_sel) {
1269 		if (mdata->pad_num != host->num_chipselect)
1270 			return dev_err_probe(dev, -EINVAL,
1271 				"pad_num does not match num_chipselect(%d != %d)\n",
1272 				mdata->pad_num, host->num_chipselect);
1273 
1274 		if (!host->cs_gpiods && host->num_chipselect > 1)
1275 			return dev_err_probe(dev, -EINVAL,
1276 				"cs_gpios not specified and num_chipselect > 1\n");
1277 	}
1278 
1279 	if (mdata->dev_comp->dma_ext)
1280 		addr_bits = DMA_ADDR_EXT_BITS;
1281 	else
1282 		addr_bits = DMA_ADDR_DEF_BITS;
1283 	ret = dma_set_mask(dev, DMA_BIT_MASK(addr_bits));
1284 	if (ret)
1285 		dev_notice(dev, "SPI dma_set_mask(%d) failed, ret:%d\n",
1286 			   addr_bits, ret);
1287 
1288 	ret = devm_request_threaded_irq(dev, irq, mtk_spi_interrupt,
1289 					mtk_spi_interrupt_thread,
1290 					IRQF_TRIGGER_NONE, dev_name(dev), host);
1291 	if (ret)
1292 		return dev_err_probe(dev, ret, "failed to register irq\n");
1293 
1294 	pm_runtime_enable(dev);
1295 
1296 	ret = devm_spi_register_controller(dev, host);
1297 	if (ret) {
1298 		pm_runtime_disable(dev);
1299 		return dev_err_probe(dev, ret, "failed to register host\n");
1300 	}
1301 
1302 	return 0;
1303 }
1304 
1305 static void mtk_spi_remove(struct platform_device *pdev)
1306 {
1307 	struct spi_controller *host = platform_get_drvdata(pdev);
1308 	struct mtk_spi *mdata = spi_controller_get_devdata(host);
1309 	int ret;
1310 
1311 	cpu_latency_qos_remove_request(&mdata->qos_request);
1312 	if (mdata->use_spimem && !completion_done(&mdata->spimem_done))
1313 		complete(&mdata->spimem_done);
1314 
1315 	ret = pm_runtime_get_sync(&pdev->dev);
1316 	if (ret < 0) {
1317 		dev_warn(&pdev->dev, "Failed to resume hardware (%pe)\n", ERR_PTR(ret));
1318 	} else {
1319 		/*
1320 		 * If pm runtime resume failed, clks are disabled and
1321 		 * unprepared. So don't access the hardware and skip clk
1322 		 * unpreparing.
1323 		 */
1324 		mtk_spi_reset(mdata);
1325 
1326 		if (mdata->dev_comp->no_need_unprepare) {
1327 			clk_unprepare(mdata->spi_clk);
1328 			clk_unprepare(mdata->spi_hclk);
1329 		}
1330 	}
1331 
1332 	pm_runtime_put_noidle(&pdev->dev);
1333 	pm_runtime_disable(&pdev->dev);
1334 }
1335 
1336 #ifdef CONFIG_PM_SLEEP
1337 static int mtk_spi_suspend(struct device *dev)
1338 {
1339 	int ret;
1340 	struct spi_controller *host = dev_get_drvdata(dev);
1341 	struct mtk_spi *mdata = spi_controller_get_devdata(host);
1342 
1343 	ret = spi_controller_suspend(host);
1344 	if (ret)
1345 		return ret;
1346 
1347 	if (!pm_runtime_suspended(dev)) {
1348 		clk_disable_unprepare(mdata->spi_clk);
1349 		clk_disable_unprepare(mdata->spi_hclk);
1350 	}
1351 
1352 	pinctrl_pm_select_sleep_state(dev);
1353 
1354 	return 0;
1355 }
1356 
1357 static int mtk_spi_resume(struct device *dev)
1358 {
1359 	int ret;
1360 	struct spi_controller *host = dev_get_drvdata(dev);
1361 	struct mtk_spi *mdata = spi_controller_get_devdata(host);
1362 
1363 	pinctrl_pm_select_default_state(dev);
1364 
1365 	if (!pm_runtime_suspended(dev)) {
1366 		ret = clk_prepare_enable(mdata->spi_clk);
1367 		if (ret < 0) {
1368 			dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
1369 			return ret;
1370 		}
1371 
1372 		ret = clk_prepare_enable(mdata->spi_hclk);
1373 		if (ret < 0) {
1374 			dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
1375 			clk_disable_unprepare(mdata->spi_clk);
1376 			return ret;
1377 		}
1378 	}
1379 
1380 	ret = spi_controller_resume(host);
1381 	if (ret < 0) {
1382 		clk_disable_unprepare(mdata->spi_clk);
1383 		clk_disable_unprepare(mdata->spi_hclk);
1384 	}
1385 
1386 	return ret;
1387 }
1388 #endif /* CONFIG_PM_SLEEP */
1389 
1390 #ifdef CONFIG_PM
1391 static int mtk_spi_runtime_suspend(struct device *dev)
1392 {
1393 	struct spi_controller *host = dev_get_drvdata(dev);
1394 	struct mtk_spi *mdata = spi_controller_get_devdata(host);
1395 
1396 	if (mdata->dev_comp->no_need_unprepare) {
1397 		clk_disable(mdata->spi_clk);
1398 		clk_disable(mdata->spi_hclk);
1399 	} else {
1400 		clk_disable_unprepare(mdata->spi_clk);
1401 		clk_disable_unprepare(mdata->spi_hclk);
1402 	}
1403 
1404 	return 0;
1405 }
1406 
1407 static int mtk_spi_runtime_resume(struct device *dev)
1408 {
1409 	struct spi_controller *host = dev_get_drvdata(dev);
1410 	struct mtk_spi *mdata = spi_controller_get_devdata(host);
1411 	int ret;
1412 
1413 	if (mdata->dev_comp->no_need_unprepare) {
1414 		ret = clk_enable(mdata->spi_clk);
1415 		if (ret < 0) {
1416 			dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
1417 			return ret;
1418 		}
1419 		ret = clk_enable(mdata->spi_hclk);
1420 		if (ret < 0) {
1421 			dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
1422 			clk_disable(mdata->spi_clk);
1423 			return ret;
1424 		}
1425 	} else {
1426 		ret = clk_prepare_enable(mdata->spi_clk);
1427 		if (ret < 0) {
1428 			dev_err(dev, "failed to prepare_enable spi_clk (%d)\n", ret);
1429 			return ret;
1430 		}
1431 
1432 		ret = clk_prepare_enable(mdata->spi_hclk);
1433 		if (ret < 0) {
1434 			dev_err(dev, "failed to prepare_enable spi_hclk (%d)\n", ret);
1435 			clk_disable_unprepare(mdata->spi_clk);
1436 			return ret;
1437 		}
1438 	}
1439 
1440 	return 0;
1441 }
1442 #endif /* CONFIG_PM */
1443 
1444 static const struct dev_pm_ops mtk_spi_pm = {
1445 	SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_suspend, mtk_spi_resume)
1446 	SET_RUNTIME_PM_OPS(mtk_spi_runtime_suspend,
1447 			   mtk_spi_runtime_resume, NULL)
1448 };
1449 
1450 static struct platform_driver mtk_spi_driver = {
1451 	.driver = {
1452 		.name = "mtk-spi",
1453 		.pm	= &mtk_spi_pm,
1454 		.of_match_table = mtk_spi_of_match,
1455 	},
1456 	.probe = mtk_spi_probe,
1457 	.remove = mtk_spi_remove,
1458 };
1459 
1460 module_platform_driver(mtk_spi_driver);
1461 
1462 MODULE_DESCRIPTION("MTK SPI Controller driver");
1463 MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>");
1464 MODULE_LICENSE("GPL v2");
1465 MODULE_ALIAS("platform:mtk-spi");
1466