1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2015 MediaTek Inc.
4 * Author: Leilk Liu <leilk.liu@mediatek.com>
5 */
6
7 #include <linux/clk.h>
8 #include <linux/device.h>
9 #include <linux/err.h>
10 #include <linux/interrupt.h>
11 #include <linux/io.h>
12 #include <linux/ioport.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/gpio/consumer.h>
16 #include <linux/pinctrl/consumer.h>
17 #include <linux/platform_device.h>
18 #include <linux/platform_data/spi-mt65xx.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/spi/spi.h>
21 #include <linux/spi/spi-mem.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/pm_qos.h>
24
25 #define SPI_CFG0_REG 0x0000
26 #define SPI_CFG1_REG 0x0004
27 #define SPI_TX_SRC_REG 0x0008
28 #define SPI_RX_DST_REG 0x000c
29 #define SPI_TX_DATA_REG 0x0010
30 #define SPI_RX_DATA_REG 0x0014
31 #define SPI_CMD_REG 0x0018
32 #define SPI_STATUS0_REG 0x001c
33 #define SPI_PAD_SEL_REG 0x0024
34 #define SPI_CFG2_REG 0x0028
35 #define SPI_TX_SRC_REG_64 0x002c
36 #define SPI_RX_DST_REG_64 0x0030
37 #define SPI_CFG3_IPM_REG 0x0040
38
39 #define SPI_CFG0_SCK_HIGH_OFFSET 0
40 #define SPI_CFG0_SCK_LOW_OFFSET 8
41 #define SPI_CFG0_CS_HOLD_OFFSET 16
42 #define SPI_CFG0_CS_SETUP_OFFSET 24
43 #define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0
44 #define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16
45
46 #define SPI_CFG1_CS_IDLE_OFFSET 0
47 #define SPI_CFG1_PACKET_LOOP_OFFSET 8
48 #define SPI_CFG1_PACKET_LENGTH_OFFSET 16
49 #define SPI_CFG1_GET_TICK_DLY_OFFSET 29
50 #define SPI_CFG1_GET_TICK_DLY_OFFSET_V1 30
51
52 #define SPI_CFG1_GET_TICK_DLY_MASK 0xe0000000
53 #define SPI_CFG1_GET_TICK_DLY_MASK_V1 0xc0000000
54
55 #define SPI_CFG1_CS_IDLE_MASK 0xff
56 #define SPI_CFG1_PACKET_LOOP_MASK 0xff00
57 #define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
58 #define SPI_CFG1_IPM_PACKET_LENGTH_MASK GENMASK(31, 16)
59 #define SPI_CFG2_SCK_HIGH_OFFSET 0
60 #define SPI_CFG2_SCK_LOW_OFFSET 16
61
62 #define SPI_CMD_ACT BIT(0)
63 #define SPI_CMD_RESUME BIT(1)
64 #define SPI_CMD_RST BIT(2)
65 #define SPI_CMD_PAUSE_EN BIT(4)
66 #define SPI_CMD_DEASSERT BIT(5)
67 #define SPI_CMD_SAMPLE_SEL BIT(6)
68 #define SPI_CMD_CS_POL BIT(7)
69 #define SPI_CMD_CPHA BIT(8)
70 #define SPI_CMD_CPOL BIT(9)
71 #define SPI_CMD_RX_DMA BIT(10)
72 #define SPI_CMD_TX_DMA BIT(11)
73 #define SPI_CMD_TXMSBF BIT(12)
74 #define SPI_CMD_RXMSBF BIT(13)
75 #define SPI_CMD_RX_ENDIAN BIT(14)
76 #define SPI_CMD_TX_ENDIAN BIT(15)
77 #define SPI_CMD_FINISH_IE BIT(16)
78 #define SPI_CMD_PAUSE_IE BIT(17)
79 #define SPI_CMD_IPM_NONIDLE_MODE BIT(19)
80 #define SPI_CMD_IPM_SPIM_LOOP BIT(21)
81 #define SPI_CMD_IPM_GET_TICKDLY_OFFSET 22
82
83 #define SPI_CMD_IPM_GET_TICKDLY_MASK GENMASK(24, 22)
84
85 #define PIN_MODE_CFG(x) ((x) / 2)
86
87 #define SPI_CFG3_IPM_HALF_DUPLEX_DIR BIT(2)
88 #define SPI_CFG3_IPM_HALF_DUPLEX_EN BIT(3)
89 #define SPI_CFG3_IPM_XMODE_EN BIT(4)
90 #define SPI_CFG3_IPM_NODATA_FLAG BIT(5)
91 #define SPI_CFG3_IPM_CMD_BYTELEN_OFFSET 8
92 #define SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET 12
93
94 #define SPI_CFG3_IPM_CMD_PIN_MODE_MASK GENMASK(1, 0)
95 #define SPI_CFG3_IPM_CMD_BYTELEN_MASK GENMASK(11, 8)
96 #define SPI_CFG3_IPM_ADDR_BYTELEN_MASK GENMASK(15, 12)
97
98 #define MT8173_SPI_MAX_PAD_SEL 3
99
100 #define MTK_SPI_PAUSE_INT_STATUS 0x2
101
102 #define MTK_SPI_MAX_FIFO_SIZE 32U
103 #define MTK_SPI_PACKET_SIZE 1024
104 #define MTK_SPI_IPM_PACKET_SIZE SZ_64K
105 #define MTK_SPI_IPM_PACKET_LOOP SZ_256
106
107 #define MTK_SPI_IDLE 0
108 #define MTK_SPI_PAUSED 1
109
110 #define MTK_SPI_32BITS_MASK (0xffffffff)
111
112 #define DMA_ADDR_EXT_BITS (36)
113 #define DMA_ADDR_DEF_BITS (32)
114
115 /**
116 * struct mtk_spi_compatible - device data structure
117 * @need_pad_sel: Enable pad (pins) selection in SPI controller
118 * @must_tx: Must explicitly send dummy TX bytes to do RX only transfer
119 * @enhance_timing: Enable adjusting cfg register to enhance time accuracy
120 * @dma_ext: DMA address extension supported
121 * @no_need_unprepare: Don't unprepare the SPI clk during runtime
122 * @ipm_design: Adjust/extend registers to support IPM design IP features
123 */
124 struct mtk_spi_compatible {
125 bool need_pad_sel;
126 bool must_tx;
127 bool enhance_timing;
128 bool dma_ext;
129 bool no_need_unprepare;
130 bool ipm_design;
131 };
132
133 /**
134 * struct mtk_spi - SPI driver instance
135 * @base: Start address of the SPI controller registers
136 * @state: SPI controller state
137 * @pad_num: Number of pad_sel entries
138 * @pad_sel: Groups of pins to select
139 * @parent_clk: Parent of sel_clk
140 * @sel_clk: SPI host mux clock
141 * @spi_clk: Peripheral clock
142 * @spi_hclk: AHB bus clock
143 * @cur_transfer: Currently processed SPI transfer
144 * @xfer_len: Number of bytes to transfer
145 * @num_xfered: Number of transferred bytes
146 * @tx_sgl: TX transfer scatterlist
147 * @rx_sgl: RX transfer scatterlist
148 * @tx_sgl_len: Size of TX DMA transfer
149 * @rx_sgl_len: Size of RX DMA transfer
150 * @dev_comp: Device data structure
151 * @qos_request: QoS request
152 * @spi_clk_hz: Current SPI clock in Hz
153 * @spimem_done: SPI-MEM operation completion
154 * @use_spimem: Enables SPI-MEM
155 * @dev: Device pointer
156 * @tx_dma: DMA start for SPI-MEM TX
157 * @rx_dma: DMA start for SPI-MEM RX
158 */
159 struct mtk_spi {
160 void __iomem *base;
161 u32 state;
162 int pad_num;
163 u32 *pad_sel;
164 struct clk *parent_clk, *sel_clk, *spi_clk, *spi_hclk;
165 struct spi_transfer *cur_transfer;
166 u32 xfer_len;
167 u32 num_xfered;
168 struct scatterlist *tx_sgl, *rx_sgl;
169 u32 tx_sgl_len, rx_sgl_len;
170 const struct mtk_spi_compatible *dev_comp;
171 struct pm_qos_request qos_request;
172 u32 spi_clk_hz;
173 struct completion spimem_done;
174 bool use_spimem;
175 struct device *dev;
176 dma_addr_t tx_dma;
177 dma_addr_t rx_dma;
178 };
179
180 static const struct mtk_spi_compatible mtk_common_compat;
181
182 static const struct mtk_spi_compatible mt2712_compat = {
183 .must_tx = true,
184 };
185
186 static const struct mtk_spi_compatible mtk_ipm_compat = {
187 .enhance_timing = true,
188 .dma_ext = true,
189 .ipm_design = true,
190 };
191
192 static const struct mtk_spi_compatible mt6765_compat = {
193 .need_pad_sel = true,
194 .must_tx = true,
195 .enhance_timing = true,
196 .dma_ext = true,
197 };
198
199 static const struct mtk_spi_compatible mt7622_compat = {
200 .must_tx = true,
201 .enhance_timing = true,
202 };
203
204 static const struct mtk_spi_compatible mt8173_compat = {
205 .need_pad_sel = true,
206 .must_tx = true,
207 };
208
209 static const struct mtk_spi_compatible mt8183_compat = {
210 .need_pad_sel = true,
211 .must_tx = true,
212 .enhance_timing = true,
213 };
214
215 static const struct mtk_spi_compatible mt6893_compat = {
216 .need_pad_sel = true,
217 .must_tx = true,
218 .enhance_timing = true,
219 .dma_ext = true,
220 .no_need_unprepare = true,
221 };
222
223 static const struct mtk_spi_compatible mt6991_compat = {
224 .need_pad_sel = true,
225 .must_tx = true,
226 .enhance_timing = true,
227 .dma_ext = true,
228 .ipm_design = true,
229 };
230
231 /*
232 * A piece of default chip info unless the platform
233 * supplies it.
234 */
235 static const struct mtk_chip_config mtk_default_chip_info = {
236 .sample_sel = 0,
237 .tick_delay = 0,
238 };
239
240 static const struct of_device_id mtk_spi_of_match[] = {
241 { .compatible = "mediatek,spi-ipm",
242 .data = (void *)&mtk_ipm_compat,
243 },
244 { .compatible = "mediatek,mt2701-spi",
245 .data = (void *)&mtk_common_compat,
246 },
247 { .compatible = "mediatek,mt2712-spi",
248 .data = (void *)&mt2712_compat,
249 },
250 { .compatible = "mediatek,mt6589-spi",
251 .data = (void *)&mtk_common_compat,
252 },
253 { .compatible = "mediatek,mt6765-spi",
254 .data = (void *)&mt6765_compat,
255 },
256 { .compatible = "mediatek,mt6991-spi",
257 .data = (void *)&mt6991_compat,
258 },
259 { .compatible = "mediatek,mt7622-spi",
260 .data = (void *)&mt7622_compat,
261 },
262 { .compatible = "mediatek,mt7629-spi",
263 .data = (void *)&mt7622_compat,
264 },
265 { .compatible = "mediatek,mt8135-spi",
266 .data = (void *)&mtk_common_compat,
267 },
268 { .compatible = "mediatek,mt8173-spi",
269 .data = (void *)&mt8173_compat,
270 },
271 { .compatible = "mediatek,mt8183-spi",
272 .data = (void *)&mt8183_compat,
273 },
274 { .compatible = "mediatek,mt8192-spi",
275 .data = (void *)&mt6765_compat,
276 },
277 { .compatible = "mediatek,mt6893-spi",
278 .data = (void *)&mt6893_compat,
279 },
280 {}
281 };
282 MODULE_DEVICE_TABLE(of, mtk_spi_of_match);
283
mtk_spi_reset(struct mtk_spi * mdata)284 static void mtk_spi_reset(struct mtk_spi *mdata)
285 {
286 u32 reg_val;
287
288 /* set the software reset bit in SPI_CMD_REG. */
289 reg_val = readl(mdata->base + SPI_CMD_REG);
290 reg_val |= SPI_CMD_RST;
291 writel(reg_val, mdata->base + SPI_CMD_REG);
292
293 reg_val = readl(mdata->base + SPI_CMD_REG);
294 reg_val &= ~SPI_CMD_RST;
295 writel(reg_val, mdata->base + SPI_CMD_REG);
296 }
297
mtk_spi_set_hw_cs_timing(struct spi_device * spi)298 static int mtk_spi_set_hw_cs_timing(struct spi_device *spi)
299 {
300 struct mtk_spi *mdata = spi_controller_get_devdata(spi->controller);
301 struct spi_delay *cs_setup = &spi->cs_setup;
302 struct spi_delay *cs_hold = &spi->cs_hold;
303 struct spi_delay *cs_inactive = &spi->cs_inactive;
304 u32 setup, hold, inactive;
305 u32 reg_val;
306 int delay;
307
308 delay = spi_delay_to_ns(cs_setup, NULL);
309 if (delay < 0)
310 return delay;
311 setup = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
312
313 delay = spi_delay_to_ns(cs_hold, NULL);
314 if (delay < 0)
315 return delay;
316 hold = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
317
318 delay = spi_delay_to_ns(cs_inactive, NULL);
319 if (delay < 0)
320 return delay;
321 inactive = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
322
323 if (hold || setup) {
324 reg_val = readl(mdata->base + SPI_CFG0_REG);
325 if (mdata->dev_comp->enhance_timing) {
326 if (hold) {
327 hold = min_t(u32, hold, 0x10000);
328 reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
329 reg_val |= (((hold - 1) & 0xffff)
330 << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
331 }
332 if (setup) {
333 setup = min_t(u32, setup, 0x10000);
334 reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
335 reg_val |= (((setup - 1) & 0xffff)
336 << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
337 }
338 } else {
339 if (hold) {
340 hold = min_t(u32, hold, 0x100);
341 reg_val &= ~(0xff << SPI_CFG0_CS_HOLD_OFFSET);
342 reg_val |= (((hold - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
343 }
344 if (setup) {
345 setup = min_t(u32, setup, 0x100);
346 reg_val &= ~(0xff << SPI_CFG0_CS_SETUP_OFFSET);
347 reg_val |= (((setup - 1) & 0xff)
348 << SPI_CFG0_CS_SETUP_OFFSET);
349 }
350 }
351 writel(reg_val, mdata->base + SPI_CFG0_REG);
352 }
353
354 if (inactive) {
355 inactive = min_t(u32, inactive, 0x100);
356 reg_val = readl(mdata->base + SPI_CFG1_REG);
357 reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
358 reg_val |= (((inactive - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
359 writel(reg_val, mdata->base + SPI_CFG1_REG);
360 }
361
362 return 0;
363 }
364
mtk_spi_hw_init(struct spi_controller * host,struct spi_device * spi)365 static int mtk_spi_hw_init(struct spi_controller *host,
366 struct spi_device *spi)
367 {
368 u16 cpha, cpol;
369 u32 reg_val;
370 struct mtk_chip_config *chip_config = spi->controller_data;
371 struct mtk_spi *mdata = spi_controller_get_devdata(host);
372
373 cpu_latency_qos_update_request(&mdata->qos_request, 500);
374 cpha = spi->mode & SPI_CPHA ? 1 : 0;
375 cpol = spi->mode & SPI_CPOL ? 1 : 0;
376
377 reg_val = readl(mdata->base + SPI_CMD_REG);
378 if (mdata->dev_comp->ipm_design) {
379 /* SPI transfer without idle time until packet length done */
380 reg_val |= SPI_CMD_IPM_NONIDLE_MODE;
381 if (spi->mode & SPI_LOOP)
382 reg_val |= SPI_CMD_IPM_SPIM_LOOP;
383 else
384 reg_val &= ~SPI_CMD_IPM_SPIM_LOOP;
385 }
386
387 if (cpha)
388 reg_val |= SPI_CMD_CPHA;
389 else
390 reg_val &= ~SPI_CMD_CPHA;
391 if (cpol)
392 reg_val |= SPI_CMD_CPOL;
393 else
394 reg_val &= ~SPI_CMD_CPOL;
395
396 /* set the mlsbx and mlsbtx */
397 if (spi->mode & SPI_LSB_FIRST) {
398 reg_val &= ~SPI_CMD_TXMSBF;
399 reg_val &= ~SPI_CMD_RXMSBF;
400 } else {
401 reg_val |= SPI_CMD_TXMSBF;
402 reg_val |= SPI_CMD_RXMSBF;
403 }
404
405 /* set the tx/rx endian */
406 #ifdef __LITTLE_ENDIAN
407 reg_val &= ~SPI_CMD_TX_ENDIAN;
408 reg_val &= ~SPI_CMD_RX_ENDIAN;
409 #else
410 reg_val |= SPI_CMD_TX_ENDIAN;
411 reg_val |= SPI_CMD_RX_ENDIAN;
412 #endif
413
414 if (mdata->dev_comp->enhance_timing) {
415 /* set CS polarity */
416 if (spi->mode & SPI_CS_HIGH)
417 reg_val |= SPI_CMD_CS_POL;
418 else
419 reg_val &= ~SPI_CMD_CS_POL;
420
421 if (chip_config->sample_sel)
422 reg_val |= SPI_CMD_SAMPLE_SEL;
423 else
424 reg_val &= ~SPI_CMD_SAMPLE_SEL;
425 }
426
427 /* set finish and pause interrupt always enable */
428 reg_val |= SPI_CMD_FINISH_IE | SPI_CMD_PAUSE_IE;
429
430 /* disable dma mode */
431 reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA);
432
433 /* disable deassert mode */
434 reg_val &= ~SPI_CMD_DEASSERT;
435
436 writel(reg_val, mdata->base + SPI_CMD_REG);
437
438 /* pad select */
439 if (mdata->dev_comp->need_pad_sel)
440 writel(mdata->pad_sel[spi_get_chipselect(spi, 0)],
441 mdata->base + SPI_PAD_SEL_REG);
442
443 /* tick delay */
444 if (mdata->dev_comp->enhance_timing) {
445 if (mdata->dev_comp->ipm_design) {
446 reg_val = readl(mdata->base + SPI_CMD_REG);
447 reg_val &= ~SPI_CMD_IPM_GET_TICKDLY_MASK;
448 reg_val |= ((chip_config->tick_delay & 0x7)
449 << SPI_CMD_IPM_GET_TICKDLY_OFFSET);
450 writel(reg_val, mdata->base + SPI_CMD_REG);
451 } else {
452 reg_val = readl(mdata->base + SPI_CFG1_REG);
453 reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK;
454 reg_val |= ((chip_config->tick_delay & 0x7)
455 << SPI_CFG1_GET_TICK_DLY_OFFSET);
456 writel(reg_val, mdata->base + SPI_CFG1_REG);
457 }
458 } else {
459 reg_val = readl(mdata->base + SPI_CFG1_REG);
460 reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK_V1;
461 reg_val |= ((chip_config->tick_delay & 0x3)
462 << SPI_CFG1_GET_TICK_DLY_OFFSET_V1);
463 writel(reg_val, mdata->base + SPI_CFG1_REG);
464 }
465
466 /* set hw cs timing */
467 mtk_spi_set_hw_cs_timing(spi);
468 return 0;
469 }
470
mtk_spi_prepare_message(struct spi_controller * host,struct spi_message * msg)471 static int mtk_spi_prepare_message(struct spi_controller *host,
472 struct spi_message *msg)
473 {
474 return mtk_spi_hw_init(host, msg->spi);
475 }
476
mtk_spi_unprepare_message(struct spi_controller * host,struct spi_message * message)477 static int mtk_spi_unprepare_message(struct spi_controller *host,
478 struct spi_message *message)
479 {
480 struct mtk_spi *mdata = spi_controller_get_devdata(host);
481
482 cpu_latency_qos_update_request(&mdata->qos_request, PM_QOS_DEFAULT_VALUE);
483 return 0;
484 }
485
mtk_spi_set_cs(struct spi_device * spi,bool enable)486 static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
487 {
488 u32 reg_val;
489 struct mtk_spi *mdata = spi_controller_get_devdata(spi->controller);
490
491 if (spi->mode & SPI_CS_HIGH)
492 enable = !enable;
493
494 reg_val = readl(mdata->base + SPI_CMD_REG);
495 if (!enable) {
496 reg_val |= SPI_CMD_PAUSE_EN;
497 writel(reg_val, mdata->base + SPI_CMD_REG);
498 } else {
499 reg_val &= ~SPI_CMD_PAUSE_EN;
500 writel(reg_val, mdata->base + SPI_CMD_REG);
501 mdata->state = MTK_SPI_IDLE;
502 mtk_spi_reset(mdata);
503 }
504 }
505
mtk_spi_prepare_transfer(struct spi_controller * host,u32 speed_hz)506 static void mtk_spi_prepare_transfer(struct spi_controller *host,
507 u32 speed_hz)
508 {
509 u32 div, sck_time, reg_val;
510 struct mtk_spi *mdata = spi_controller_get_devdata(host);
511
512 if (speed_hz < mdata->spi_clk_hz / 2)
513 div = DIV_ROUND_UP(mdata->spi_clk_hz, speed_hz);
514 else
515 div = 1;
516
517 sck_time = (div + 1) / 2;
518
519 if (mdata->dev_comp->enhance_timing) {
520 reg_val = readl(mdata->base + SPI_CFG2_REG);
521 reg_val &= ~(0xffff << SPI_CFG2_SCK_HIGH_OFFSET);
522 reg_val |= (((sck_time - 1) & 0xffff)
523 << SPI_CFG2_SCK_HIGH_OFFSET);
524 reg_val &= ~(0xffff << SPI_CFG2_SCK_LOW_OFFSET);
525 reg_val |= (((sck_time - 1) & 0xffff)
526 << SPI_CFG2_SCK_LOW_OFFSET);
527 writel(reg_val, mdata->base + SPI_CFG2_REG);
528 } else {
529 reg_val = readl(mdata->base + SPI_CFG0_REG);
530 reg_val &= ~(0xff << SPI_CFG0_SCK_HIGH_OFFSET);
531 reg_val |= (((sck_time - 1) & 0xff)
532 << SPI_CFG0_SCK_HIGH_OFFSET);
533 reg_val &= ~(0xff << SPI_CFG0_SCK_LOW_OFFSET);
534 reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET);
535 writel(reg_val, mdata->base + SPI_CFG0_REG);
536 }
537 }
538
mtk_spi_setup_packet(struct spi_controller * host)539 static void mtk_spi_setup_packet(struct spi_controller *host)
540 {
541 u32 packet_size, packet_loop, reg_val;
542 struct mtk_spi *mdata = spi_controller_get_devdata(host);
543
544 if (mdata->dev_comp->ipm_design)
545 packet_size = min_t(u32,
546 mdata->xfer_len,
547 MTK_SPI_IPM_PACKET_SIZE);
548 else
549 packet_size = min_t(u32,
550 mdata->xfer_len,
551 MTK_SPI_PACKET_SIZE);
552
553 packet_loop = mdata->xfer_len / packet_size;
554
555 reg_val = readl(mdata->base + SPI_CFG1_REG);
556 if (mdata->dev_comp->ipm_design)
557 reg_val &= ~SPI_CFG1_IPM_PACKET_LENGTH_MASK;
558 else
559 reg_val &= ~SPI_CFG1_PACKET_LENGTH_MASK;
560 reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
561 reg_val &= ~SPI_CFG1_PACKET_LOOP_MASK;
562 reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
563 writel(reg_val, mdata->base + SPI_CFG1_REG);
564 }
565
mtk_spi_set_nbit(u32 nbit)566 inline u32 mtk_spi_set_nbit(u32 nbit)
567 {
568 switch (nbit) {
569 default:
570 pr_warn_once("unknown nbit mode %u. Falling back to single mode\n",
571 nbit);
572 fallthrough;
573 case SPI_NBITS_SINGLE:
574 return 0x0;
575 case SPI_NBITS_DUAL:
576 return 0x1;
577 case SPI_NBITS_QUAD:
578 return 0x2;
579 }
580 }
581
mtk_spi_enable_transfer(struct spi_controller * host)582 static void mtk_spi_enable_transfer(struct spi_controller *host)
583 {
584 u32 cmd;
585 struct mtk_spi *mdata = spi_controller_get_devdata(host);
586
587 cmd = readl(mdata->base + SPI_CMD_REG);
588 if (mdata->state == MTK_SPI_IDLE)
589 cmd |= SPI_CMD_ACT;
590 else
591 cmd |= SPI_CMD_RESUME;
592 writel(cmd, mdata->base + SPI_CMD_REG);
593 }
594
mtk_spi_get_mult_delta(struct mtk_spi * mdata,u32 xfer_len)595 static int mtk_spi_get_mult_delta(struct mtk_spi *mdata, u32 xfer_len)
596 {
597 u32 mult_delta = 0;
598
599 if (mdata->dev_comp->ipm_design) {
600 if (xfer_len > MTK_SPI_IPM_PACKET_SIZE)
601 mult_delta = xfer_len % MTK_SPI_IPM_PACKET_SIZE;
602 } else {
603 if (xfer_len > MTK_SPI_PACKET_SIZE)
604 mult_delta = xfer_len % MTK_SPI_PACKET_SIZE;
605 }
606
607 return mult_delta;
608 }
609
mtk_spi_update_mdata_len(struct spi_controller * host)610 static void mtk_spi_update_mdata_len(struct spi_controller *host)
611 {
612 int mult_delta;
613 struct mtk_spi *mdata = spi_controller_get_devdata(host);
614
615 if (mdata->tx_sgl_len && mdata->rx_sgl_len) {
616 if (mdata->tx_sgl_len > mdata->rx_sgl_len) {
617 mult_delta = mtk_spi_get_mult_delta(mdata, mdata->rx_sgl_len);
618 mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
619 mdata->rx_sgl_len = mult_delta;
620 mdata->tx_sgl_len -= mdata->xfer_len;
621 } else {
622 mult_delta = mtk_spi_get_mult_delta(mdata, mdata->tx_sgl_len);
623 mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
624 mdata->tx_sgl_len = mult_delta;
625 mdata->rx_sgl_len -= mdata->xfer_len;
626 }
627 } else if (mdata->tx_sgl_len) {
628 mult_delta = mtk_spi_get_mult_delta(mdata, mdata->tx_sgl_len);
629 mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
630 mdata->tx_sgl_len = mult_delta;
631 } else if (mdata->rx_sgl_len) {
632 mult_delta = mtk_spi_get_mult_delta(mdata, mdata->rx_sgl_len);
633 mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
634 mdata->rx_sgl_len = mult_delta;
635 }
636 }
637
mtk_spi_setup_dma_addr(struct spi_controller * host,struct spi_transfer * xfer)638 static void mtk_spi_setup_dma_addr(struct spi_controller *host,
639 struct spi_transfer *xfer)
640 {
641 struct mtk_spi *mdata = spi_controller_get_devdata(host);
642
643 if (mdata->tx_sgl) {
644 writel((u32)(xfer->tx_dma & MTK_SPI_32BITS_MASK),
645 mdata->base + SPI_TX_SRC_REG);
646 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
647 if (mdata->dev_comp->dma_ext)
648 writel((u32)(xfer->tx_dma >> 32),
649 mdata->base + SPI_TX_SRC_REG_64);
650 #endif
651 }
652
653 if (mdata->rx_sgl) {
654 writel((u32)(xfer->rx_dma & MTK_SPI_32BITS_MASK),
655 mdata->base + SPI_RX_DST_REG);
656 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
657 if (mdata->dev_comp->dma_ext)
658 writel((u32)(xfer->rx_dma >> 32),
659 mdata->base + SPI_RX_DST_REG_64);
660 #endif
661 }
662 }
663
mtk_spi_fifo_transfer(struct spi_controller * host,struct spi_device * spi,struct spi_transfer * xfer)664 static int mtk_spi_fifo_transfer(struct spi_controller *host,
665 struct spi_device *spi,
666 struct spi_transfer *xfer)
667 {
668 int cnt, remainder;
669 u32 reg_val;
670 struct mtk_spi *mdata = spi_controller_get_devdata(host);
671
672 mdata->cur_transfer = xfer;
673 mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, xfer->len);
674 mdata->num_xfered = 0;
675 mtk_spi_prepare_transfer(host, xfer->speed_hz);
676 mtk_spi_setup_packet(host);
677
678 if (xfer->tx_buf) {
679 cnt = xfer->len / 4;
680 iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt);
681 remainder = xfer->len % 4;
682 if (remainder > 0) {
683 reg_val = 0;
684 memcpy(®_val, xfer->tx_buf + (cnt * 4), remainder);
685 writel(reg_val, mdata->base + SPI_TX_DATA_REG);
686 }
687 }
688
689 mtk_spi_enable_transfer(host);
690
691 return 1;
692 }
693
mtk_spi_dma_transfer(struct spi_controller * host,struct spi_device * spi,struct spi_transfer * xfer)694 static int mtk_spi_dma_transfer(struct spi_controller *host,
695 struct spi_device *spi,
696 struct spi_transfer *xfer)
697 {
698 int cmd;
699 struct mtk_spi *mdata = spi_controller_get_devdata(host);
700
701 mdata->tx_sgl = NULL;
702 mdata->rx_sgl = NULL;
703 mdata->tx_sgl_len = 0;
704 mdata->rx_sgl_len = 0;
705 mdata->cur_transfer = xfer;
706 mdata->num_xfered = 0;
707
708 mtk_spi_prepare_transfer(host, xfer->speed_hz);
709
710 cmd = readl(mdata->base + SPI_CMD_REG);
711 if (xfer->tx_buf)
712 cmd |= SPI_CMD_TX_DMA;
713 if (xfer->rx_buf)
714 cmd |= SPI_CMD_RX_DMA;
715 writel(cmd, mdata->base + SPI_CMD_REG);
716
717 if (xfer->tx_buf)
718 mdata->tx_sgl = xfer->tx_sg.sgl;
719 if (xfer->rx_buf)
720 mdata->rx_sgl = xfer->rx_sg.sgl;
721
722 if (mdata->tx_sgl) {
723 xfer->tx_dma = sg_dma_address(mdata->tx_sgl);
724 mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
725 }
726 if (mdata->rx_sgl) {
727 xfer->rx_dma = sg_dma_address(mdata->rx_sgl);
728 mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
729 }
730
731 mtk_spi_update_mdata_len(host);
732 mtk_spi_setup_packet(host);
733 mtk_spi_setup_dma_addr(host, xfer);
734 mtk_spi_enable_transfer(host);
735
736 return 1;
737 }
738
mtk_spi_transfer_one(struct spi_controller * host,struct spi_device * spi,struct spi_transfer * xfer)739 static int mtk_spi_transfer_one(struct spi_controller *host,
740 struct spi_device *spi,
741 struct spi_transfer *xfer)
742 {
743 struct mtk_spi *mdata = spi_controller_get_devdata(spi->controller);
744 u32 reg_val = 0;
745
746 /* prepare xfer direction and duplex mode */
747 if (mdata->dev_comp->ipm_design) {
748 if (xfer->tx_buf && xfer->rx_buf) {
749 reg_val &= ~SPI_CFG3_IPM_HALF_DUPLEX_EN;
750 } else if (xfer->tx_buf) {
751 reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
752 reg_val &= ~SPI_CFG3_IPM_HALF_DUPLEX_DIR;
753 reg_val |= mtk_spi_set_nbit(xfer->tx_nbits);
754 } else {
755 reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
756 reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
757 reg_val |= mtk_spi_set_nbit(xfer->rx_nbits);
758 }
759 writel(reg_val, mdata->base + SPI_CFG3_IPM_REG);
760 }
761
762 if (host->can_dma(host, spi, xfer))
763 return mtk_spi_dma_transfer(host, spi, xfer);
764 else
765 return mtk_spi_fifo_transfer(host, spi, xfer);
766 }
767
mtk_spi_can_dma(struct spi_controller * host,struct spi_device * spi,struct spi_transfer * xfer)768 static bool mtk_spi_can_dma(struct spi_controller *host,
769 struct spi_device *spi,
770 struct spi_transfer *xfer)
771 {
772 /* Buffers for DMA transactions must be 4-byte aligned */
773 return (xfer->len > MTK_SPI_MAX_FIFO_SIZE &&
774 (unsigned long)xfer->tx_buf % 4 == 0 &&
775 (unsigned long)xfer->rx_buf % 4 == 0);
776 }
777
mtk_spi_setup(struct spi_device * spi)778 static int mtk_spi_setup(struct spi_device *spi)
779 {
780 struct mtk_spi *mdata = spi_controller_get_devdata(spi->controller);
781
782 if (!spi->controller_data)
783 spi->controller_data = (void *)&mtk_default_chip_info;
784
785 if (mdata->dev_comp->need_pad_sel && spi_get_csgpiod(spi, 0))
786 /* CS de-asserted, gpiolib will handle inversion */
787 gpiod_direction_output(spi_get_csgpiod(spi, 0), 0);
788
789 return 0;
790 }
791
mtk_spi_interrupt_thread(int irq,void * dev_id)792 static irqreturn_t mtk_spi_interrupt_thread(int irq, void *dev_id)
793 {
794 u32 cmd, reg_val, cnt, remainder, len;
795 struct spi_controller *host = dev_id;
796 struct mtk_spi *mdata = spi_controller_get_devdata(host);
797 struct spi_transfer *xfer = mdata->cur_transfer;
798
799 if (!host->can_dma(host, NULL, xfer)) {
800 if (xfer->rx_buf) {
801 cnt = mdata->xfer_len / 4;
802 ioread32_rep(mdata->base + SPI_RX_DATA_REG,
803 xfer->rx_buf + mdata->num_xfered, cnt);
804 remainder = mdata->xfer_len % 4;
805 if (remainder > 0) {
806 reg_val = readl(mdata->base + SPI_RX_DATA_REG);
807 memcpy(xfer->rx_buf + (cnt * 4) + mdata->num_xfered,
808 ®_val,
809 remainder);
810 }
811 }
812
813 mdata->num_xfered += mdata->xfer_len;
814 if (mdata->num_xfered == xfer->len) {
815 spi_finalize_current_transfer(host);
816 return IRQ_HANDLED;
817 }
818
819 len = xfer->len - mdata->num_xfered;
820 mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len);
821 mtk_spi_setup_packet(host);
822
823 if (xfer->tx_buf) {
824 cnt = mdata->xfer_len / 4;
825 iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
826 xfer->tx_buf + mdata->num_xfered, cnt);
827
828 remainder = mdata->xfer_len % 4;
829 if (remainder > 0) {
830 reg_val = 0;
831 memcpy(®_val,
832 xfer->tx_buf + (cnt * 4) + mdata->num_xfered,
833 remainder);
834 writel(reg_val, mdata->base + SPI_TX_DATA_REG);
835 }
836 }
837
838 mtk_spi_enable_transfer(host);
839
840 return IRQ_HANDLED;
841 }
842
843 if (mdata->tx_sgl)
844 xfer->tx_dma += mdata->xfer_len;
845 if (mdata->rx_sgl)
846 xfer->rx_dma += mdata->xfer_len;
847
848 if (mdata->tx_sgl && (mdata->tx_sgl_len == 0)) {
849 mdata->tx_sgl = sg_next(mdata->tx_sgl);
850 if (mdata->tx_sgl) {
851 xfer->tx_dma = sg_dma_address(mdata->tx_sgl);
852 mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
853 }
854 }
855 if (mdata->rx_sgl && (mdata->rx_sgl_len == 0)) {
856 mdata->rx_sgl = sg_next(mdata->rx_sgl);
857 if (mdata->rx_sgl) {
858 xfer->rx_dma = sg_dma_address(mdata->rx_sgl);
859 mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
860 }
861 }
862
863 if (!mdata->tx_sgl && !mdata->rx_sgl) {
864 /* spi disable dma */
865 cmd = readl(mdata->base + SPI_CMD_REG);
866 cmd &= ~SPI_CMD_TX_DMA;
867 cmd &= ~SPI_CMD_RX_DMA;
868 writel(cmd, mdata->base + SPI_CMD_REG);
869
870 spi_finalize_current_transfer(host);
871 return IRQ_HANDLED;
872 }
873
874 mtk_spi_update_mdata_len(host);
875 mtk_spi_setup_packet(host);
876 mtk_spi_setup_dma_addr(host, xfer);
877 mtk_spi_enable_transfer(host);
878
879 return IRQ_HANDLED;
880 }
881
mtk_spi_interrupt(int irq,void * dev_id)882 static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
883 {
884 struct spi_controller *host = dev_id;
885 struct mtk_spi *mdata = spi_controller_get_devdata(host);
886 u32 reg_val;
887
888 reg_val = readl(mdata->base + SPI_STATUS0_REG);
889 if (reg_val & MTK_SPI_PAUSE_INT_STATUS)
890 mdata->state = MTK_SPI_PAUSED;
891 else
892 mdata->state = MTK_SPI_IDLE;
893
894 /* SPI-MEM ops */
895 if (mdata->use_spimem) {
896 complete(&mdata->spimem_done);
897 return IRQ_HANDLED;
898 }
899
900 return IRQ_WAKE_THREAD;
901 }
902
mtk_spi_mem_adjust_op_size(struct spi_mem * mem,struct spi_mem_op * op)903 static int mtk_spi_mem_adjust_op_size(struct spi_mem *mem,
904 struct spi_mem_op *op)
905 {
906 int opcode_len;
907
908 if (op->data.dir != SPI_MEM_NO_DATA) {
909 opcode_len = 1 + op->addr.nbytes + op->dummy.nbytes;
910 if (opcode_len + op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
911 op->data.nbytes = MTK_SPI_IPM_PACKET_SIZE - opcode_len;
912 /* force data buffer dma-aligned. */
913 op->data.nbytes -= op->data.nbytes % 4;
914 }
915 }
916
917 return 0;
918 }
919
mtk_spi_mem_supports_op(struct spi_mem * mem,const struct spi_mem_op * op)920 static bool mtk_spi_mem_supports_op(struct spi_mem *mem,
921 const struct spi_mem_op *op)
922 {
923 if (!spi_mem_default_supports_op(mem, op))
924 return false;
925
926 if (op->addr.nbytes && op->dummy.nbytes &&
927 op->addr.buswidth != op->dummy.buswidth)
928 return false;
929
930 if (op->addr.nbytes + op->dummy.nbytes > 16)
931 return false;
932
933 if (op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
934 if (op->data.nbytes / MTK_SPI_IPM_PACKET_SIZE >
935 MTK_SPI_IPM_PACKET_LOOP ||
936 op->data.nbytes % MTK_SPI_IPM_PACKET_SIZE != 0)
937 return false;
938 }
939
940 return true;
941 }
942
mtk_spi_mem_setup_dma_xfer(struct spi_controller * host,const struct spi_mem_op * op)943 static void mtk_spi_mem_setup_dma_xfer(struct spi_controller *host,
944 const struct spi_mem_op *op)
945 {
946 struct mtk_spi *mdata = spi_controller_get_devdata(host);
947
948 writel((u32)(mdata->tx_dma & MTK_SPI_32BITS_MASK),
949 mdata->base + SPI_TX_SRC_REG);
950 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
951 if (mdata->dev_comp->dma_ext)
952 writel((u32)(mdata->tx_dma >> 32),
953 mdata->base + SPI_TX_SRC_REG_64);
954 #endif
955
956 if (op->data.dir == SPI_MEM_DATA_IN) {
957 writel((u32)(mdata->rx_dma & MTK_SPI_32BITS_MASK),
958 mdata->base + SPI_RX_DST_REG);
959 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
960 if (mdata->dev_comp->dma_ext)
961 writel((u32)(mdata->rx_dma >> 32),
962 mdata->base + SPI_RX_DST_REG_64);
963 #endif
964 }
965 }
966
mtk_spi_transfer_wait(struct spi_mem * mem,const struct spi_mem_op * op)967 static int mtk_spi_transfer_wait(struct spi_mem *mem,
968 const struct spi_mem_op *op)
969 {
970 struct mtk_spi *mdata = spi_controller_get_devdata(mem->spi->controller);
971 /*
972 * For each byte we wait for 8 cycles of the SPI clock.
973 * Since speed is defined in Hz and we want milliseconds,
974 * so it should be 8 * 1000.
975 */
976 u64 ms = 8000LL;
977
978 if (op->data.dir == SPI_MEM_NO_DATA)
979 ms *= 32; /* prevent we may get 0 for short transfers. */
980 else
981 ms *= op->data.nbytes;
982 ms = div_u64(ms, mem->spi->max_speed_hz);
983 ms += ms + 1000; /* 1s tolerance */
984
985 if (ms > UINT_MAX)
986 ms = UINT_MAX;
987
988 if (!wait_for_completion_timeout(&mdata->spimem_done,
989 msecs_to_jiffies(ms))) {
990 dev_err(mdata->dev, "spi-mem transfer timeout\n");
991 return -ETIMEDOUT;
992 }
993
994 return 0;
995 }
996
mtk_spi_mem_exec_op(struct spi_mem * mem,const struct spi_mem_op * op)997 static int mtk_spi_mem_exec_op(struct spi_mem *mem,
998 const struct spi_mem_op *op)
999 {
1000 struct mtk_spi *mdata = spi_controller_get_devdata(mem->spi->controller);
1001 u32 reg_val, nio, tx_size;
1002 char *tx_tmp_buf, *rx_tmp_buf;
1003 int ret = 0;
1004
1005 mdata->use_spimem = true;
1006 reinit_completion(&mdata->spimem_done);
1007
1008 mtk_spi_reset(mdata);
1009 mtk_spi_hw_init(mem->spi->controller, mem->spi);
1010 mtk_spi_prepare_transfer(mem->spi->controller, op->max_freq);
1011
1012 reg_val = readl(mdata->base + SPI_CFG3_IPM_REG);
1013 /* opcode byte len */
1014 reg_val &= ~SPI_CFG3_IPM_CMD_BYTELEN_MASK;
1015 reg_val |= 1 << SPI_CFG3_IPM_CMD_BYTELEN_OFFSET;
1016
1017 /* addr & dummy byte len */
1018 reg_val &= ~SPI_CFG3_IPM_ADDR_BYTELEN_MASK;
1019 if (op->addr.nbytes || op->dummy.nbytes)
1020 reg_val |= (op->addr.nbytes + op->dummy.nbytes) <<
1021 SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET;
1022
1023 /* data byte len */
1024 if (op->data.dir == SPI_MEM_NO_DATA) {
1025 reg_val |= SPI_CFG3_IPM_NODATA_FLAG;
1026 writel(0, mdata->base + SPI_CFG1_REG);
1027 } else {
1028 reg_val &= ~SPI_CFG3_IPM_NODATA_FLAG;
1029 mdata->xfer_len = op->data.nbytes;
1030 mtk_spi_setup_packet(mem->spi->controller);
1031 }
1032
1033 if (op->addr.nbytes || op->dummy.nbytes) {
1034 if (op->addr.buswidth == 1 || op->dummy.buswidth == 1)
1035 reg_val |= SPI_CFG3_IPM_XMODE_EN;
1036 else
1037 reg_val &= ~SPI_CFG3_IPM_XMODE_EN;
1038 }
1039
1040 if (op->addr.buswidth == 2 ||
1041 op->dummy.buswidth == 2 ||
1042 op->data.buswidth == 2)
1043 nio = 2;
1044 else if (op->addr.buswidth == 4 ||
1045 op->dummy.buswidth == 4 ||
1046 op->data.buswidth == 4)
1047 nio = 4;
1048 else
1049 nio = 1;
1050
1051 reg_val &= ~SPI_CFG3_IPM_CMD_PIN_MODE_MASK;
1052 reg_val |= PIN_MODE_CFG(nio);
1053
1054 reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
1055 if (op->data.dir == SPI_MEM_DATA_IN)
1056 reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
1057 else
1058 reg_val &= ~SPI_CFG3_IPM_HALF_DUPLEX_DIR;
1059 writel(reg_val, mdata->base + SPI_CFG3_IPM_REG);
1060
1061 tx_size = 1 + op->addr.nbytes + op->dummy.nbytes;
1062 if (op->data.dir == SPI_MEM_DATA_OUT)
1063 tx_size += op->data.nbytes;
1064
1065 tx_size = max_t(u32, tx_size, 32);
1066
1067 tx_tmp_buf = kzalloc(tx_size, GFP_KERNEL | GFP_DMA);
1068 if (!tx_tmp_buf) {
1069 mdata->use_spimem = false;
1070 return -ENOMEM;
1071 }
1072
1073 tx_tmp_buf[0] = op->cmd.opcode;
1074
1075 if (op->addr.nbytes) {
1076 int i;
1077
1078 for (i = 0; i < op->addr.nbytes; i++)
1079 tx_tmp_buf[i + 1] = op->addr.val >>
1080 (8 * (op->addr.nbytes - i - 1));
1081 }
1082
1083 if (op->dummy.nbytes)
1084 memset(tx_tmp_buf + op->addr.nbytes + 1,
1085 0xff,
1086 op->dummy.nbytes);
1087
1088 if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
1089 memcpy(tx_tmp_buf + op->dummy.nbytes + op->addr.nbytes + 1,
1090 op->data.buf.out,
1091 op->data.nbytes);
1092
1093 mdata->tx_dma = dma_map_single(mdata->dev, tx_tmp_buf,
1094 tx_size, DMA_TO_DEVICE);
1095 if (dma_mapping_error(mdata->dev, mdata->tx_dma)) {
1096 ret = -ENOMEM;
1097 goto err_exit;
1098 }
1099
1100 if (op->data.dir == SPI_MEM_DATA_IN) {
1101 if (!IS_ALIGNED((size_t)op->data.buf.in, 4)) {
1102 rx_tmp_buf = kzalloc(op->data.nbytes,
1103 GFP_KERNEL | GFP_DMA);
1104 if (!rx_tmp_buf) {
1105 ret = -ENOMEM;
1106 goto unmap_tx_dma;
1107 }
1108 } else {
1109 rx_tmp_buf = op->data.buf.in;
1110 }
1111
1112 mdata->rx_dma = dma_map_single(mdata->dev,
1113 rx_tmp_buf,
1114 op->data.nbytes,
1115 DMA_FROM_DEVICE);
1116 if (dma_mapping_error(mdata->dev, mdata->rx_dma)) {
1117 ret = -ENOMEM;
1118 goto kfree_rx_tmp_buf;
1119 }
1120 }
1121
1122 reg_val = readl(mdata->base + SPI_CMD_REG);
1123 reg_val |= SPI_CMD_TX_DMA;
1124 if (op->data.dir == SPI_MEM_DATA_IN)
1125 reg_val |= SPI_CMD_RX_DMA;
1126 writel(reg_val, mdata->base + SPI_CMD_REG);
1127
1128 mtk_spi_mem_setup_dma_xfer(mem->spi->controller, op);
1129
1130 mtk_spi_enable_transfer(mem->spi->controller);
1131
1132 /* Wait for the interrupt. */
1133 ret = mtk_spi_transfer_wait(mem, op);
1134 if (ret)
1135 goto unmap_rx_dma;
1136
1137 /* spi disable dma */
1138 reg_val = readl(mdata->base + SPI_CMD_REG);
1139 reg_val &= ~SPI_CMD_TX_DMA;
1140 if (op->data.dir == SPI_MEM_DATA_IN)
1141 reg_val &= ~SPI_CMD_RX_DMA;
1142 writel(reg_val, mdata->base + SPI_CMD_REG);
1143
1144 unmap_rx_dma:
1145 if (op->data.dir == SPI_MEM_DATA_IN) {
1146 dma_unmap_single(mdata->dev, mdata->rx_dma,
1147 op->data.nbytes, DMA_FROM_DEVICE);
1148 if (!IS_ALIGNED((size_t)op->data.buf.in, 4))
1149 memcpy(op->data.buf.in, rx_tmp_buf, op->data.nbytes);
1150 }
1151 kfree_rx_tmp_buf:
1152 if (op->data.dir == SPI_MEM_DATA_IN &&
1153 !IS_ALIGNED((size_t)op->data.buf.in, 4))
1154 kfree(rx_tmp_buf);
1155 unmap_tx_dma:
1156 dma_unmap_single(mdata->dev, mdata->tx_dma,
1157 tx_size, DMA_TO_DEVICE);
1158 err_exit:
1159 kfree(tx_tmp_buf);
1160 mdata->use_spimem = false;
1161
1162 return ret;
1163 }
1164
1165 static const struct spi_controller_mem_ops mtk_spi_mem_ops = {
1166 .adjust_op_size = mtk_spi_mem_adjust_op_size,
1167 .supports_op = mtk_spi_mem_supports_op,
1168 .exec_op = mtk_spi_mem_exec_op,
1169 };
1170
1171 static const struct spi_controller_mem_caps mtk_spi_mem_caps = {
1172 .per_op_freq = true,
1173 };
1174
mtk_spi_probe(struct platform_device * pdev)1175 static int mtk_spi_probe(struct platform_device *pdev)
1176 {
1177 struct device *dev = &pdev->dev;
1178 struct spi_controller *host;
1179 struct mtk_spi *mdata;
1180 int i, irq, ret, addr_bits;
1181
1182 host = devm_spi_alloc_host(dev, sizeof(*mdata));
1183 if (!host)
1184 return -ENOMEM;
1185
1186 host->auto_runtime_pm = true;
1187 host->dev.of_node = dev->of_node;
1188 host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
1189
1190 host->set_cs = mtk_spi_set_cs;
1191 host->prepare_message = mtk_spi_prepare_message;
1192 host->unprepare_message = mtk_spi_unprepare_message;
1193 host->transfer_one = mtk_spi_transfer_one;
1194 host->can_dma = mtk_spi_can_dma;
1195 host->setup = mtk_spi_setup;
1196 host->set_cs_timing = mtk_spi_set_hw_cs_timing;
1197 host->use_gpio_descriptors = true;
1198
1199 mdata = spi_controller_get_devdata(host);
1200 mdata->dev_comp = device_get_match_data(dev);
1201
1202 if (mdata->dev_comp->enhance_timing)
1203 host->mode_bits |= SPI_CS_HIGH;
1204
1205 if (mdata->dev_comp->must_tx)
1206 host->flags = SPI_CONTROLLER_MUST_TX;
1207 if (mdata->dev_comp->ipm_design)
1208 host->mode_bits |= SPI_LOOP | SPI_RX_DUAL | SPI_TX_DUAL |
1209 SPI_RX_QUAD | SPI_TX_QUAD;
1210
1211 if (mdata->dev_comp->ipm_design) {
1212 mdata->dev = dev;
1213 host->mem_ops = &mtk_spi_mem_ops;
1214 host->mem_caps = &mtk_spi_mem_caps;
1215 init_completion(&mdata->spimem_done);
1216 }
1217
1218 if (mdata->dev_comp->need_pad_sel) {
1219 mdata->pad_num = of_property_count_u32_elems(dev->of_node,
1220 "mediatek,pad-select");
1221 if (mdata->pad_num < 0)
1222 return dev_err_probe(dev, -EINVAL,
1223 "No 'mediatek,pad-select' property\n");
1224
1225 mdata->pad_sel = devm_kmalloc_array(dev, mdata->pad_num,
1226 sizeof(u32), GFP_KERNEL);
1227 if (!mdata->pad_sel)
1228 return -ENOMEM;
1229
1230 for (i = 0; i < mdata->pad_num; i++) {
1231 of_property_read_u32_index(dev->of_node,
1232 "mediatek,pad-select",
1233 i, &mdata->pad_sel[i]);
1234 if (mdata->pad_sel[i] > MT8173_SPI_MAX_PAD_SEL)
1235 return dev_err_probe(dev, -EINVAL,
1236 "wrong pad-sel[%d]: %u\n",
1237 i, mdata->pad_sel[i]);
1238 }
1239 }
1240
1241 platform_set_drvdata(pdev, host);
1242 mdata->base = devm_platform_ioremap_resource(pdev, 0);
1243 if (IS_ERR(mdata->base))
1244 return PTR_ERR(mdata->base);
1245
1246 irq = platform_get_irq(pdev, 0);
1247 if (irq < 0)
1248 return irq;
1249
1250 if (!dev->dma_mask)
1251 dev->dma_mask = &dev->coherent_dma_mask;
1252
1253 if (mdata->dev_comp->ipm_design)
1254 dma_set_max_seg_size(dev, SZ_16M);
1255 else
1256 dma_set_max_seg_size(dev, SZ_256K);
1257
1258 mdata->parent_clk = devm_clk_get(dev, "parent-clk");
1259 if (IS_ERR(mdata->parent_clk))
1260 return dev_err_probe(dev, PTR_ERR(mdata->parent_clk),
1261 "failed to get parent-clk\n");
1262
1263 mdata->sel_clk = devm_clk_get(dev, "sel-clk");
1264 if (IS_ERR(mdata->sel_clk))
1265 return dev_err_probe(dev, PTR_ERR(mdata->sel_clk), "failed to get sel-clk\n");
1266
1267 mdata->spi_clk = devm_clk_get(dev, "spi-clk");
1268 if (IS_ERR(mdata->spi_clk))
1269 return dev_err_probe(dev, PTR_ERR(mdata->spi_clk), "failed to get spi-clk\n");
1270
1271 mdata->spi_hclk = devm_clk_get_optional(dev, "hclk");
1272 if (IS_ERR(mdata->spi_hclk))
1273 return dev_err_probe(dev, PTR_ERR(mdata->spi_hclk), "failed to get hclk\n");
1274
1275 ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk);
1276 if (ret < 0)
1277 return dev_err_probe(dev, ret, "failed to clk_set_parent\n");
1278
1279 ret = clk_prepare_enable(mdata->spi_hclk);
1280 if (ret < 0)
1281 return dev_err_probe(dev, ret, "failed to enable hclk\n");
1282
1283 ret = clk_prepare_enable(mdata->spi_clk);
1284 if (ret < 0) {
1285 clk_disable_unprepare(mdata->spi_hclk);
1286 return dev_err_probe(dev, ret, "failed to enable spi_clk\n");
1287 }
1288
1289 mdata->spi_clk_hz = clk_get_rate(mdata->spi_clk);
1290
1291 if (mdata->dev_comp->no_need_unprepare) {
1292 clk_disable(mdata->spi_clk);
1293 clk_disable(mdata->spi_hclk);
1294 } else {
1295 clk_disable_unprepare(mdata->spi_clk);
1296 clk_disable_unprepare(mdata->spi_hclk);
1297 }
1298
1299 cpu_latency_qos_add_request(&mdata->qos_request, PM_QOS_DEFAULT_VALUE);
1300
1301 if (mdata->dev_comp->need_pad_sel) {
1302 if (mdata->pad_num != host->num_chipselect)
1303 return dev_err_probe(dev, -EINVAL,
1304 "pad_num does not match num_chipselect(%d != %d)\n",
1305 mdata->pad_num, host->num_chipselect);
1306
1307 if (!host->cs_gpiods && host->num_chipselect > 1)
1308 return dev_err_probe(dev, -EINVAL,
1309 "cs_gpios not specified and num_chipselect > 1\n");
1310 }
1311
1312 if (mdata->dev_comp->dma_ext)
1313 addr_bits = DMA_ADDR_EXT_BITS;
1314 else
1315 addr_bits = DMA_ADDR_DEF_BITS;
1316 ret = dma_set_mask(dev, DMA_BIT_MASK(addr_bits));
1317 if (ret)
1318 dev_notice(dev, "SPI dma_set_mask(%d) failed, ret:%d\n",
1319 addr_bits, ret);
1320
1321 ret = devm_request_threaded_irq(dev, irq, mtk_spi_interrupt,
1322 mtk_spi_interrupt_thread,
1323 IRQF_ONESHOT, dev_name(dev), host);
1324 if (ret)
1325 return dev_err_probe(dev, ret, "failed to register irq\n");
1326
1327 pm_runtime_enable(dev);
1328
1329 ret = devm_spi_register_controller(dev, host);
1330 if (ret) {
1331 pm_runtime_disable(dev);
1332 return dev_err_probe(dev, ret, "failed to register host\n");
1333 }
1334
1335 return 0;
1336 }
1337
mtk_spi_remove(struct platform_device * pdev)1338 static void mtk_spi_remove(struct platform_device *pdev)
1339 {
1340 struct spi_controller *host = platform_get_drvdata(pdev);
1341 struct mtk_spi *mdata = spi_controller_get_devdata(host);
1342 int ret;
1343
1344 cpu_latency_qos_remove_request(&mdata->qos_request);
1345 if (mdata->use_spimem && !completion_done(&mdata->spimem_done))
1346 complete(&mdata->spimem_done);
1347
1348 ret = pm_runtime_get_sync(&pdev->dev);
1349 if (ret < 0) {
1350 dev_warn(&pdev->dev, "Failed to resume hardware (%pe)\n", ERR_PTR(ret));
1351 } else {
1352 /*
1353 * If pm runtime resume failed, clks are disabled and
1354 * unprepared. So don't access the hardware and skip clk
1355 * unpreparing.
1356 */
1357 mtk_spi_reset(mdata);
1358
1359 if (mdata->dev_comp->no_need_unprepare) {
1360 clk_unprepare(mdata->spi_clk);
1361 clk_unprepare(mdata->spi_hclk);
1362 }
1363 }
1364
1365 pm_runtime_put_noidle(&pdev->dev);
1366 pm_runtime_disable(&pdev->dev);
1367 }
1368
1369 #ifdef CONFIG_PM_SLEEP
mtk_spi_suspend(struct device * dev)1370 static int mtk_spi_suspend(struct device *dev)
1371 {
1372 int ret;
1373 struct spi_controller *host = dev_get_drvdata(dev);
1374 struct mtk_spi *mdata = spi_controller_get_devdata(host);
1375
1376 ret = spi_controller_suspend(host);
1377 if (ret)
1378 return ret;
1379
1380 if (!pm_runtime_suspended(dev)) {
1381 clk_disable_unprepare(mdata->spi_clk);
1382 clk_disable_unprepare(mdata->spi_hclk);
1383 }
1384
1385 pinctrl_pm_select_sleep_state(dev);
1386
1387 return 0;
1388 }
1389
mtk_spi_resume(struct device * dev)1390 static int mtk_spi_resume(struct device *dev)
1391 {
1392 int ret;
1393 struct spi_controller *host = dev_get_drvdata(dev);
1394 struct mtk_spi *mdata = spi_controller_get_devdata(host);
1395
1396 pinctrl_pm_select_default_state(dev);
1397
1398 if (!pm_runtime_suspended(dev)) {
1399 ret = clk_prepare_enable(mdata->spi_clk);
1400 if (ret < 0) {
1401 dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
1402 return ret;
1403 }
1404
1405 ret = clk_prepare_enable(mdata->spi_hclk);
1406 if (ret < 0) {
1407 dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
1408 clk_disable_unprepare(mdata->spi_clk);
1409 return ret;
1410 }
1411 }
1412
1413 ret = spi_controller_resume(host);
1414 if (ret < 0) {
1415 clk_disable_unprepare(mdata->spi_clk);
1416 clk_disable_unprepare(mdata->spi_hclk);
1417 }
1418
1419 return ret;
1420 }
1421 #endif /* CONFIG_PM_SLEEP */
1422
1423 #ifdef CONFIG_PM
mtk_spi_runtime_suspend(struct device * dev)1424 static int mtk_spi_runtime_suspend(struct device *dev)
1425 {
1426 struct spi_controller *host = dev_get_drvdata(dev);
1427 struct mtk_spi *mdata = spi_controller_get_devdata(host);
1428
1429 if (mdata->dev_comp->no_need_unprepare) {
1430 clk_disable(mdata->spi_clk);
1431 clk_disable(mdata->spi_hclk);
1432 } else {
1433 clk_disable_unprepare(mdata->spi_clk);
1434 clk_disable_unprepare(mdata->spi_hclk);
1435 }
1436
1437 return 0;
1438 }
1439
mtk_spi_runtime_resume(struct device * dev)1440 static int mtk_spi_runtime_resume(struct device *dev)
1441 {
1442 struct spi_controller *host = dev_get_drvdata(dev);
1443 struct mtk_spi *mdata = spi_controller_get_devdata(host);
1444 int ret;
1445
1446 if (mdata->dev_comp->no_need_unprepare) {
1447 ret = clk_enable(mdata->spi_clk);
1448 if (ret < 0) {
1449 dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
1450 return ret;
1451 }
1452 ret = clk_enable(mdata->spi_hclk);
1453 if (ret < 0) {
1454 dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
1455 clk_disable(mdata->spi_clk);
1456 return ret;
1457 }
1458 } else {
1459 ret = clk_prepare_enable(mdata->spi_clk);
1460 if (ret < 0) {
1461 dev_err(dev, "failed to prepare_enable spi_clk (%d)\n", ret);
1462 return ret;
1463 }
1464
1465 ret = clk_prepare_enable(mdata->spi_hclk);
1466 if (ret < 0) {
1467 dev_err(dev, "failed to prepare_enable spi_hclk (%d)\n", ret);
1468 clk_disable_unprepare(mdata->spi_clk);
1469 return ret;
1470 }
1471 }
1472
1473 return 0;
1474 }
1475 #endif /* CONFIG_PM */
1476
1477 static const struct dev_pm_ops mtk_spi_pm = {
1478 SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_suspend, mtk_spi_resume)
1479 SET_RUNTIME_PM_OPS(mtk_spi_runtime_suspend,
1480 mtk_spi_runtime_resume, NULL)
1481 };
1482
1483 static struct platform_driver mtk_spi_driver = {
1484 .driver = {
1485 .name = "mtk-spi",
1486 .pm = &mtk_spi_pm,
1487 .of_match_table = mtk_spi_of_match,
1488 },
1489 .probe = mtk_spi_probe,
1490 .remove = mtk_spi_remove,
1491 };
1492
1493 module_platform_driver(mtk_spi_driver);
1494
1495 MODULE_DESCRIPTION("MTK SPI Controller driver");
1496 MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>");
1497 MODULE_LICENSE("GPL v2");
1498 MODULE_ALIAS("platform:mtk-spi");
1499