xref: /linux/drivers/spi/spi-stm32-ospi.c (revision 1260ed77798502de9c98020040d2995008de10cc)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) STMicroelectronics 2025 - All Rights Reserved
4  */
5 
6 #include <linux/bitfield.h>
7 #include <linux/clk.h>
8 #include <linux/delay.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dmaengine.h>
11 #include <linux/err.h>
12 #include <linux/errno.h>
13 #include <linux/gpio/consumer.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/iopoll.h>
17 #include <linux/mfd/syscon.h>
18 #include <linux/module.h>
19 #include <linux/mutex.h>
20 #include <linux/of.h>
21 #include <linux/of_address.h>
22 #include <linux/of_device.h>
23 #include <linux/of_reserved_mem.h>
24 #include <linux/pinctrl/consumer.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/reset.h>
28 #include <linux/sizes.h>
29 #include <linux/spi/spi-mem.h>
30 #include <linux/types.h>
31 
32 #define OSPI_CR			0x00
33 #define CR_EN			BIT(0)
34 #define CR_ABORT		BIT(1)
35 #define CR_DMAEN		BIT(2)
36 #define CR_FTHRES_SHIFT		8
37 #define CR_TEIE			BIT(16)
38 #define CR_TCIE			BIT(17)
39 #define CR_SMIE			BIT(19)
40 #define CR_APMS			BIT(22)
41 #define CR_CSSEL		BIT(24)
42 #define CR_FMODE_MASK		GENMASK(29, 28)
43 #define CR_FMODE_INDW		(0U)
44 #define CR_FMODE_INDR		(1U)
45 #define CR_FMODE_APM		(2U)
46 #define CR_FMODE_MM		(3U)
47 
48 #define OSPI_DCR1		0x08
49 #define DCR1_DLYBYP		BIT(3)
50 #define DCR1_DEVSIZE_MASK	GENMASK(20, 16)
51 #define DCR1_MTYP_MASK		GENMASK(26, 24)
52 #define DCR1_MTYP_MX_MODE	1
53 #define DCR1_MTYP_HP_MEMMODE	4
54 
55 #define OSPI_DCR2		0x0c
56 #define DCR2_PRESC_MASK		GENMASK(7, 0)
57 
58 #define OSPI_SR			0x20
59 #define SR_TEF			BIT(0)
60 #define SR_TCF			BIT(1)
61 #define SR_FTF			BIT(2)
62 #define SR_SMF			BIT(3)
63 #define SR_BUSY			BIT(5)
64 
65 #define OSPI_FCR		0x24
66 #define FCR_CTEF		BIT(0)
67 #define FCR_CTCF		BIT(1)
68 #define FCR_CSMF		BIT(3)
69 
70 #define OSPI_DLR		0x40
71 #define OSPI_AR			0x48
72 #define OSPI_DR			0x50
73 #define OSPI_PSMKR		0x80
74 #define OSPI_PSMAR		0x88
75 
76 #define OSPI_CCR		0x100
77 #define CCR_IMODE_MASK		GENMASK(2, 0)
78 #define CCR_IDTR		BIT(3)
79 #define CCR_ISIZE_MASK		GENMASK(5, 4)
80 #define CCR_ADMODE_MASK		GENMASK(10, 8)
81 #define CCR_ADMODE_8LINES	4
82 #define CCR_ADDTR		BIT(11)
83 #define CCR_ADSIZE_MASK		GENMASK(13, 12)
84 #define CCR_ADSIZE_32BITS	3
85 #define CCR_DMODE_MASK		GENMASK(26, 24)
86 #define CCR_DMODE_8LINES	4
87 #define CCR_DQSE		BIT(29)
88 #define CCR_DDTR		BIT(27)
89 #define CCR_BUSWIDTH_0		0x0
90 #define CCR_BUSWIDTH_1		0x1
91 #define CCR_BUSWIDTH_2		0x2
92 #define CCR_BUSWIDTH_4		0x3
93 #define CCR_BUSWIDTH_8		0x4
94 
95 #define OSPI_TCR		0x108
96 #define TCR_DCYC_MASK		GENMASK(4, 0)
97 #define TCR_DHQC		BIT(28)
98 #define TCR_SSHIFT		BIT(30)
99 
100 #define OSPI_IR			0x110
101 
102 #define STM32_OSPI_MAX_MMAP_SZ	SZ_256M
103 #define STM32_OSPI_MAX_NORCHIP	2
104 
105 #define STM32_FIFO_TIMEOUT_US		30000
106 #define STM32_ABT_TIMEOUT_US		100000
107 #define STM32_COMP_TIMEOUT_MS		5000
108 #define STM32_BUSY_TIMEOUT_US		100000
109 
110 
111 #define STM32_AUTOSUSPEND_DELAY -1
112 
113 struct stm32_ospi {
114 	struct device *dev;
115 	struct spi_controller *ctrl;
116 	struct clk *clk;
117 	struct reset_control *rstc;
118 
119 	struct completion data_completion;
120 	struct completion match_completion;
121 
122 	struct dma_chan *dma_chtx;
123 	struct dma_chan *dma_chrx;
124 	struct completion dma_completion;
125 
126 	void __iomem *regs_base;
127 	void __iomem *mm_base;
128 	phys_addr_t regs_phys_base;
129 	resource_size_t mm_size;
130 	u32 clk_rate;
131 	u32 fmode;
132 	u32 cr_reg;
133 	u32 dcr_reg;
134 	u32 flash_presc[STM32_OSPI_MAX_NORCHIP];
135 	int irq;
136 	unsigned long status_timeout;
137 
138 	/*
139 	 * To protect device configuration, could be different between
140 	 * 2 flash access
141 	 */
142 	struct mutex lock;
143 };
144 
145 static void stm32_ospi_read_fifo(u8 *val, void __iomem *addr)
146 {
147 	*val = readb_relaxed(addr);
148 }
149 
150 static void stm32_ospi_write_fifo(u8 *val, void __iomem *addr)
151 {
152 	writeb_relaxed(*val, addr);
153 }
154 
155 static int stm32_ospi_abort(struct stm32_ospi *ospi)
156 {
157 	void __iomem *regs_base = ospi->regs_base;
158 	u32 cr;
159 	int timeout;
160 
161 	cr = readl_relaxed(regs_base + OSPI_CR) | CR_ABORT;
162 	writel_relaxed(cr, regs_base + OSPI_CR);
163 
164 	/* wait clear of abort bit by hw */
165 	timeout = readl_relaxed_poll_timeout_atomic(regs_base + OSPI_CR,
166 						    cr, !(cr & CR_ABORT), 1,
167 						    STM32_ABT_TIMEOUT_US);
168 
169 	if (timeout)
170 		dev_err(ospi->dev, "%s abort timeout:%d\n", __func__, timeout);
171 
172 	return timeout;
173 }
174 
175 static int stm32_ospi_poll(struct stm32_ospi *ospi, u8 *buf, u32 len, bool read)
176 {
177 	void __iomem *regs_base = ospi->regs_base;
178 	void (*fifo)(u8 *val, void __iomem *addr);
179 	u32 sr;
180 	int ret;
181 
182 	if (read)
183 		fifo = stm32_ospi_read_fifo;
184 	else
185 		fifo = stm32_ospi_write_fifo;
186 
187 	while (len--) {
188 		ret = readl_relaxed_poll_timeout_atomic(regs_base + OSPI_SR,
189 							sr, sr & SR_FTF, 1,
190 							STM32_FIFO_TIMEOUT_US);
191 		if (ret) {
192 			dev_err(ospi->dev, "fifo timeout (len:%d stat:%#x)\n",
193 				len, sr);
194 			return ret;
195 		}
196 		fifo(buf++, regs_base + OSPI_DR);
197 	}
198 
199 	return 0;
200 }
201 
202 static int stm32_ospi_wait_nobusy(struct stm32_ospi *ospi)
203 {
204 	u32 sr;
205 
206 	return readl_relaxed_poll_timeout_atomic(ospi->regs_base + OSPI_SR,
207 						 sr, !(sr & SR_BUSY), 1,
208 						 STM32_BUSY_TIMEOUT_US);
209 }
210 
211 static int stm32_ospi_wait_cmd(struct stm32_ospi *ospi)
212 {
213 	void __iomem *regs_base = ospi->regs_base;
214 	u32 cr, sr;
215 	int err = 0;
216 
217 	if ((readl_relaxed(regs_base + OSPI_SR) & SR_TCF) ||
218 	    ospi->fmode == CR_FMODE_APM)
219 		goto out;
220 
221 	reinit_completion(&ospi->data_completion);
222 	cr = readl_relaxed(regs_base + OSPI_CR);
223 	writel_relaxed(cr | CR_TCIE | CR_TEIE, regs_base + OSPI_CR);
224 
225 	if (!wait_for_completion_timeout(&ospi->data_completion,
226 				msecs_to_jiffies(STM32_COMP_TIMEOUT_MS)))
227 		err = -ETIMEDOUT;
228 
229 	sr = readl_relaxed(regs_base + OSPI_SR);
230 	if (sr & SR_TCF)
231 		/* avoid false timeout */
232 		err = 0;
233 	if (sr & SR_TEF)
234 		err = -EIO;
235 
236 out:
237 	/* clear flags */
238 	writel_relaxed(FCR_CTCF | FCR_CTEF, regs_base + OSPI_FCR);
239 
240 	if (!err)
241 		err = stm32_ospi_wait_nobusy(ospi);
242 
243 	return err;
244 }
245 
246 static void stm32_ospi_dma_callback(void *arg)
247 {
248 	struct completion *dma_completion = arg;
249 
250 	complete(dma_completion);
251 }
252 
253 static irqreturn_t stm32_ospi_irq(int irq, void *dev_id)
254 {
255 	struct stm32_ospi *ospi = (struct stm32_ospi *)dev_id;
256 	void __iomem *regs_base = ospi->regs_base;
257 	u32 cr, sr;
258 
259 	cr = readl_relaxed(regs_base + OSPI_CR);
260 	sr = readl_relaxed(regs_base + OSPI_SR);
261 
262 	if (cr & CR_SMIE && sr & SR_SMF) {
263 		/* disable irq */
264 		cr &= ~CR_SMIE;
265 		writel_relaxed(cr, regs_base + OSPI_CR);
266 		complete(&ospi->match_completion);
267 
268 		return IRQ_HANDLED;
269 	}
270 
271 	if (sr & (SR_TEF | SR_TCF)) {
272 		/* disable irq */
273 		cr &= ~CR_TCIE & ~CR_TEIE;
274 		writel_relaxed(cr, regs_base + OSPI_CR);
275 		complete(&ospi->data_completion);
276 	}
277 
278 	return IRQ_HANDLED;
279 }
280 
281 static void stm32_ospi_dma_setup(struct stm32_ospi *ospi,
282 			 struct dma_slave_config *dma_cfg)
283 {
284 	if (dma_cfg && ospi->dma_chrx) {
285 		if (dmaengine_slave_config(ospi->dma_chrx, dma_cfg)) {
286 			dev_err(ospi->dev, "dma rx config failed\n");
287 			dma_release_channel(ospi->dma_chrx);
288 			ospi->dma_chrx = NULL;
289 		}
290 	}
291 
292 	if (dma_cfg && ospi->dma_chtx) {
293 		if (dmaengine_slave_config(ospi->dma_chtx, dma_cfg)) {
294 			dev_err(ospi->dev, "dma tx config failed\n");
295 			dma_release_channel(ospi->dma_chtx);
296 			ospi->dma_chtx = NULL;
297 		}
298 	}
299 
300 	init_completion(&ospi->dma_completion);
301 }
302 
303 static int stm32_ospi_tx_mm(struct stm32_ospi *ospi,
304 			    const struct spi_mem_op *op)
305 {
306 	memcpy_fromio(op->data.buf.in, ospi->mm_base + op->addr.val,
307 		      op->data.nbytes);
308 	return 0;
309 }
310 
311 static int stm32_ospi_tx_dma(struct stm32_ospi *ospi,
312 			     const struct spi_mem_op *op)
313 {
314 	struct dma_async_tx_descriptor *desc;
315 	void __iomem *regs_base = ospi->regs_base;
316 	enum dma_transfer_direction dma_dir;
317 	struct dma_chan *dma_ch;
318 	struct sg_table sgt;
319 	dma_cookie_t cookie;
320 	u32 cr, t_out;
321 	int err;
322 
323 	if (op->data.dir == SPI_MEM_DATA_IN) {
324 		dma_dir = DMA_DEV_TO_MEM;
325 		dma_ch = ospi->dma_chrx;
326 	} else {
327 		dma_dir = DMA_MEM_TO_DEV;
328 		dma_ch = ospi->dma_chtx;
329 	}
330 
331 	/*
332 	 * Spi_map_buf return -EINVAL if the buffer is not DMA-able
333 	 * (DMA-able: in vmalloc | kmap | virt_addr_valid)
334 	 */
335 	err = spi_controller_dma_map_mem_op_data(ospi->ctrl, op, &sgt);
336 	if (err)
337 		return err;
338 
339 	desc = dmaengine_prep_slave_sg(dma_ch, sgt.sgl, sgt.nents,
340 				       dma_dir, DMA_PREP_INTERRUPT);
341 	if (!desc) {
342 		err = -ENOMEM;
343 		goto out_unmap;
344 	}
345 
346 	cr = readl_relaxed(regs_base + OSPI_CR);
347 
348 	reinit_completion(&ospi->dma_completion);
349 	desc->callback = stm32_ospi_dma_callback;
350 	desc->callback_param = &ospi->dma_completion;
351 	cookie = dmaengine_submit(desc);
352 	err = dma_submit_error(cookie);
353 	if (err)
354 		goto out;
355 
356 	dma_async_issue_pending(dma_ch);
357 
358 	writel_relaxed(cr | CR_DMAEN, regs_base + OSPI_CR);
359 
360 	t_out = sgt.nents * STM32_COMP_TIMEOUT_MS;
361 	if (!wait_for_completion_timeout(&ospi->dma_completion,
362 					 msecs_to_jiffies(t_out)))
363 		err = -ETIMEDOUT;
364 
365 	if (err)
366 		dmaengine_terminate_all(dma_ch);
367 
368 out:
369 	writel_relaxed(cr & ~CR_DMAEN, regs_base + OSPI_CR);
370 out_unmap:
371 	spi_controller_dma_unmap_mem_op_data(ospi->ctrl, op, &sgt);
372 
373 	return err;
374 }
375 
376 static int stm32_ospi_xfer(struct stm32_ospi *ospi, const struct spi_mem_op *op)
377 {
378 	u8 *buf;
379 
380 	if (!op->data.nbytes)
381 		return 0;
382 
383 	if (ospi->fmode == CR_FMODE_MM)
384 		return stm32_ospi_tx_mm(ospi, op);
385 	else if (((op->data.dir == SPI_MEM_DATA_IN && ospi->dma_chrx) ||
386 		 (op->data.dir == SPI_MEM_DATA_OUT && ospi->dma_chtx)) &&
387 		  op->data.nbytes > 8)
388 		if (!stm32_ospi_tx_dma(ospi, op))
389 			return 0;
390 
391 	if (op->data.dir == SPI_MEM_DATA_IN)
392 		buf = op->data.buf.in;
393 	else
394 		buf = (u8 *)op->data.buf.out;
395 
396 	return stm32_ospi_poll(ospi, buf, op->data.nbytes,
397 			       op->data.dir == SPI_MEM_DATA_IN);
398 }
399 
400 static int stm32_ospi_wait_poll_status(struct stm32_ospi *ospi,
401 				       const struct spi_mem_op *op)
402 {
403 	void __iomem *regs_base = ospi->regs_base;
404 	u32 cr;
405 
406 	reinit_completion(&ospi->match_completion);
407 	cr = readl_relaxed(regs_base + OSPI_CR);
408 	writel_relaxed(cr | CR_SMIE, regs_base + OSPI_CR);
409 
410 	if (!wait_for_completion_timeout(&ospi->match_completion,
411 					 msecs_to_jiffies(ospi->status_timeout))) {
412 		u32 sr = readl_relaxed(regs_base + OSPI_SR);
413 
414 		/* Avoid false timeout */
415 		if (!(sr & SR_SMF))
416 			return -ETIMEDOUT;
417 	}
418 
419 	writel_relaxed(FCR_CSMF, regs_base + OSPI_FCR);
420 
421 	return 0;
422 }
423 
424 static int stm32_ospi_get_mode(u8 buswidth)
425 {
426 	switch (buswidth) {
427 	case 8:
428 		return CCR_BUSWIDTH_8;
429 	case 4:
430 		return CCR_BUSWIDTH_4;
431 	default:
432 		return buswidth;
433 	}
434 }
435 
436 static int stm32_ospi_send(struct spi_device *spi, const struct spi_mem_op *op)
437 {
438 	struct stm32_ospi *ospi = spi_controller_get_devdata(spi->controller);
439 	void __iomem *regs_base = ospi->regs_base;
440 	u32 ccr, cr, dcr2, tcr;
441 	int timeout, err = 0, err_poll_status = 0;
442 	u8 cs = spi->chip_select[ffs(spi->cs_index_mask) - 1];
443 
444 	dev_dbg(ospi->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
445 		op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
446 		op->dummy.buswidth, op->data.buswidth,
447 		op->addr.val, op->data.nbytes);
448 
449 	cr = readl_relaxed(ospi->regs_base + OSPI_CR);
450 	cr &= ~CR_CSSEL;
451 	cr |= FIELD_PREP(CR_CSSEL, cs);
452 	cr &= ~CR_FMODE_MASK;
453 	cr |= FIELD_PREP(CR_FMODE_MASK, ospi->fmode);
454 	writel_relaxed(cr, regs_base + OSPI_CR);
455 
456 	if (op->data.nbytes)
457 		writel_relaxed(op->data.nbytes - 1, regs_base + OSPI_DLR);
458 
459 	/* set prescaler */
460 	dcr2 = readl_relaxed(regs_base + OSPI_DCR2);
461 	dcr2 |= FIELD_PREP(DCR2_PRESC_MASK, ospi->flash_presc[cs]);
462 	writel_relaxed(dcr2, regs_base + OSPI_DCR2);
463 
464 	ccr = FIELD_PREP(CCR_IMODE_MASK, stm32_ospi_get_mode(op->cmd.buswidth));
465 
466 	if (op->addr.nbytes) {
467 		ccr |= FIELD_PREP(CCR_ADMODE_MASK,
468 				  stm32_ospi_get_mode(op->addr.buswidth));
469 		ccr |= FIELD_PREP(CCR_ADSIZE_MASK, op->addr.nbytes - 1);
470 	}
471 
472 	tcr = TCR_SSHIFT;
473 	if (op->dummy.buswidth && op->dummy.nbytes) {
474 		tcr |= FIELD_PREP(TCR_DCYC_MASK,
475 				  op->dummy.nbytes * 8 / op->dummy.buswidth);
476 	}
477 	writel_relaxed(tcr, regs_base + OSPI_TCR);
478 
479 	if (op->data.nbytes) {
480 		ccr |= FIELD_PREP(CCR_DMODE_MASK,
481 				  stm32_ospi_get_mode(op->data.buswidth));
482 	}
483 
484 	writel_relaxed(ccr, regs_base + OSPI_CCR);
485 
486 	/* set instruction, must be set after ccr register update */
487 	writel_relaxed(op->cmd.opcode, regs_base + OSPI_IR);
488 
489 	if (op->addr.nbytes && ospi->fmode != CR_FMODE_MM)
490 		writel_relaxed(op->addr.val, regs_base + OSPI_AR);
491 
492 	if (ospi->fmode == CR_FMODE_APM)
493 		err_poll_status = stm32_ospi_wait_poll_status(ospi, op);
494 
495 	err = stm32_ospi_xfer(ospi, op);
496 
497 	/*
498 	 * Abort in:
499 	 * -error case
500 	 * -read memory map: prefetching must be stopped if we read the last
501 	 *  byte of device (device size - fifo size). like device size is not
502 	 *  knows, the prefetching is always stop.
503 	 */
504 	if (err || err_poll_status || ospi->fmode == CR_FMODE_MM)
505 		goto abort;
506 
507 	/* Wait end of tx in indirect mode */
508 	err = stm32_ospi_wait_cmd(ospi);
509 	if (err)
510 		goto abort;
511 
512 	return 0;
513 
514 abort:
515 	timeout = stm32_ospi_abort(ospi);
516 	writel_relaxed(FCR_CTCF | FCR_CSMF, regs_base + OSPI_FCR);
517 
518 	if (err || err_poll_status || timeout)
519 		dev_err(ospi->dev, "%s err:%d err_poll_status:%d abort timeout:%d\n",
520 			__func__, err, err_poll_status, timeout);
521 
522 	return err;
523 }
524 
525 static int stm32_ospi_poll_status(struct spi_mem *mem,
526 				  const struct spi_mem_op *op,
527 				  u16 mask, u16 match,
528 				  unsigned long initial_delay_us,
529 				  unsigned long polling_rate_us,
530 				  unsigned long timeout_ms)
531 {
532 	struct stm32_ospi *ospi = spi_controller_get_devdata(mem->spi->controller);
533 	void __iomem *regs_base = ospi->regs_base;
534 	int ret;
535 
536 	ret = pm_runtime_resume_and_get(ospi->dev);
537 	if (ret < 0)
538 		return ret;
539 
540 	mutex_lock(&ospi->lock);
541 
542 	writel_relaxed(mask, regs_base + OSPI_PSMKR);
543 	writel_relaxed(match, regs_base + OSPI_PSMAR);
544 	ospi->fmode = CR_FMODE_APM;
545 	ospi->status_timeout = timeout_ms;
546 
547 	ret = stm32_ospi_send(mem->spi, op);
548 	mutex_unlock(&ospi->lock);
549 
550 	pm_runtime_mark_last_busy(ospi->dev);
551 	pm_runtime_put_autosuspend(ospi->dev);
552 
553 	return ret;
554 }
555 
556 static int stm32_ospi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
557 {
558 	struct stm32_ospi *ospi = spi_controller_get_devdata(mem->spi->controller);
559 	int ret;
560 
561 	ret = pm_runtime_resume_and_get(ospi->dev);
562 	if (ret < 0)
563 		return ret;
564 
565 	mutex_lock(&ospi->lock);
566 	if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes)
567 		ospi->fmode = CR_FMODE_INDR;
568 	else
569 		ospi->fmode = CR_FMODE_INDW;
570 
571 	ret = stm32_ospi_send(mem->spi, op);
572 	mutex_unlock(&ospi->lock);
573 
574 	pm_runtime_mark_last_busy(ospi->dev);
575 	pm_runtime_put_autosuspend(ospi->dev);
576 
577 	return ret;
578 }
579 
580 static int stm32_ospi_dirmap_create(struct spi_mem_dirmap_desc *desc)
581 {
582 	struct stm32_ospi *ospi = spi_controller_get_devdata(desc->mem->spi->controller);
583 
584 	if (desc->info.op_tmpl.data.dir == SPI_MEM_DATA_OUT)
585 		return -EOPNOTSUPP;
586 
587 	/* Should never happen, as mm_base == null is an error probe exit condition */
588 	if (!ospi->mm_base && desc->info.op_tmpl.data.dir == SPI_MEM_DATA_IN)
589 		return -EOPNOTSUPP;
590 
591 	if (!ospi->mm_size)
592 		return -EOPNOTSUPP;
593 
594 	return 0;
595 }
596 
597 static ssize_t stm32_ospi_dirmap_read(struct spi_mem_dirmap_desc *desc,
598 				      u64 offs, size_t len, void *buf)
599 {
600 	struct stm32_ospi *ospi = spi_controller_get_devdata(desc->mem->spi->controller);
601 	struct spi_mem_op op;
602 	u32 addr_max;
603 	int ret;
604 
605 	ret = pm_runtime_resume_and_get(ospi->dev);
606 	if (ret < 0)
607 		return ret;
608 
609 	mutex_lock(&ospi->lock);
610 	/*
611 	 * Make a local copy of desc op_tmpl and complete dirmap rdesc
612 	 * spi_mem_op template with offs, len and *buf in  order to get
613 	 * all needed transfer information into struct spi_mem_op
614 	 */
615 	memcpy(&op, &desc->info.op_tmpl, sizeof(struct spi_mem_op));
616 	dev_dbg(ospi->dev, "%s len = 0x%zx offs = 0x%llx buf = 0x%p\n", __func__, len, offs, buf);
617 
618 	op.data.nbytes = len;
619 	op.addr.val = desc->info.offset + offs;
620 	op.data.buf.in = buf;
621 
622 	addr_max = op.addr.val + op.data.nbytes + 1;
623 	if (addr_max < ospi->mm_size && op.addr.buswidth)
624 		ospi->fmode = CR_FMODE_MM;
625 	else
626 		ospi->fmode = CR_FMODE_INDR;
627 
628 	ret = stm32_ospi_send(desc->mem->spi, &op);
629 	mutex_unlock(&ospi->lock);
630 
631 	pm_runtime_mark_last_busy(ospi->dev);
632 	pm_runtime_put_autosuspend(ospi->dev);
633 
634 	return ret ?: len;
635 }
636 
637 static int stm32_ospi_transfer_one_message(struct spi_controller *ctrl,
638 					   struct spi_message *msg)
639 {
640 	struct stm32_ospi *ospi = spi_controller_get_devdata(ctrl);
641 	struct spi_transfer *transfer;
642 	struct spi_device *spi = msg->spi;
643 	struct spi_mem_op op;
644 	struct gpio_desc *cs_gpiod = spi->cs_gpiod[ffs(spi->cs_index_mask) - 1];
645 	int ret = 0;
646 
647 	if (!cs_gpiod)
648 		return -EOPNOTSUPP;
649 
650 	ret = pm_runtime_resume_and_get(ospi->dev);
651 	if (ret < 0)
652 		return ret;
653 
654 	mutex_lock(&ospi->lock);
655 
656 	gpiod_set_value_cansleep(cs_gpiod, true);
657 
658 	list_for_each_entry(transfer, &msg->transfers, transfer_list) {
659 		u8 dummy_bytes = 0;
660 
661 		memset(&op, 0, sizeof(op));
662 
663 		dev_dbg(ospi->dev, "tx_buf:%p tx_nbits:%d rx_buf:%p rx_nbits:%d len:%d dummy_data:%d\n",
664 			transfer->tx_buf, transfer->tx_nbits,
665 			transfer->rx_buf, transfer->rx_nbits,
666 			transfer->len, transfer->dummy_data);
667 
668 		/*
669 		 * OSPI hardware supports dummy bytes transfer.
670 		 * If current transfer is dummy byte, merge it with the next
671 		 * transfer in order to take into account OSPI block constraint
672 		 */
673 		if (transfer->dummy_data) {
674 			op.dummy.buswidth = transfer->tx_nbits;
675 			op.dummy.nbytes = transfer->len;
676 			dummy_bytes = transfer->len;
677 
678 			/* If happens, means that message is not correctly built */
679 			if (list_is_last(&transfer->transfer_list, &msg->transfers)) {
680 				ret = -EINVAL;
681 				goto end_of_transfer;
682 			}
683 
684 			transfer = list_next_entry(transfer, transfer_list);
685 		}
686 
687 		op.data.nbytes = transfer->len;
688 
689 		if (transfer->rx_buf) {
690 			ospi->fmode = CR_FMODE_INDR;
691 			op.data.buswidth = transfer->rx_nbits;
692 			op.data.dir = SPI_MEM_DATA_IN;
693 			op.data.buf.in = transfer->rx_buf;
694 		} else {
695 			ospi->fmode = CR_FMODE_INDW;
696 			op.data.buswidth = transfer->tx_nbits;
697 			op.data.dir = SPI_MEM_DATA_OUT;
698 			op.data.buf.out = transfer->tx_buf;
699 		}
700 
701 		ret = stm32_ospi_send(spi, &op);
702 		if (ret)
703 			goto end_of_transfer;
704 
705 		msg->actual_length += transfer->len + dummy_bytes;
706 	}
707 
708 end_of_transfer:
709 	gpiod_set_value_cansleep(cs_gpiod, false);
710 
711 	mutex_unlock(&ospi->lock);
712 
713 	msg->status = ret;
714 	spi_finalize_current_message(ctrl);
715 
716 	pm_runtime_mark_last_busy(ospi->dev);
717 	pm_runtime_put_autosuspend(ospi->dev);
718 
719 	return ret;
720 }
721 
722 static int stm32_ospi_setup(struct spi_device *spi)
723 {
724 	struct spi_controller *ctrl = spi->controller;
725 	struct stm32_ospi *ospi = spi_controller_get_devdata(ctrl);
726 	void __iomem *regs_base = ospi->regs_base;
727 	int ret;
728 	u8 cs = spi->chip_select[ffs(spi->cs_index_mask) - 1];
729 
730 	if (ctrl->busy)
731 		return -EBUSY;
732 
733 	if (!spi->max_speed_hz)
734 		return -EINVAL;
735 
736 	ret = pm_runtime_resume_and_get(ospi->dev);
737 	if (ret < 0)
738 		return ret;
739 
740 	ospi->flash_presc[cs] = DIV_ROUND_UP(ospi->clk_rate, spi->max_speed_hz) - 1;
741 
742 	mutex_lock(&ospi->lock);
743 
744 	ospi->cr_reg = CR_APMS | 3 << CR_FTHRES_SHIFT | CR_EN;
745 	writel_relaxed(ospi->cr_reg, regs_base + OSPI_CR);
746 
747 	/* set dcr fsize to max address */
748 	ospi->dcr_reg = DCR1_DEVSIZE_MASK | DCR1_DLYBYP;
749 	writel_relaxed(ospi->dcr_reg, regs_base + OSPI_DCR1);
750 
751 	mutex_unlock(&ospi->lock);
752 
753 	pm_runtime_mark_last_busy(ospi->dev);
754 	pm_runtime_put_autosuspend(ospi->dev);
755 
756 	return 0;
757 }
758 
759 /*
760  * No special host constraint, so use default spi_mem_default_supports_op
761  * to check supported mode.
762  */
763 static const struct spi_controller_mem_ops stm32_ospi_mem_ops = {
764 	.exec_op	= stm32_ospi_exec_op,
765 	.dirmap_create	= stm32_ospi_dirmap_create,
766 	.dirmap_read	= stm32_ospi_dirmap_read,
767 	.poll_status	= stm32_ospi_poll_status,
768 };
769 
770 static int stm32_ospi_get_resources(struct platform_device *pdev)
771 {
772 	struct device *dev = &pdev->dev;
773 	struct stm32_ospi *ospi = platform_get_drvdata(pdev);
774 	struct resource *res;
775 	struct reserved_mem *rmem = NULL;
776 	struct device_node *node;
777 	int ret;
778 
779 	ospi->regs_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
780 	if (IS_ERR(ospi->regs_base))
781 		return PTR_ERR(ospi->regs_base);
782 
783 	ospi->regs_phys_base = res->start;
784 
785 	ospi->clk = devm_clk_get(dev, NULL);
786 	if (IS_ERR(ospi->clk))
787 		return dev_err_probe(dev, PTR_ERR(ospi->clk),
788 				     "Can't get clock\n");
789 
790 	ospi->clk_rate = clk_get_rate(ospi->clk);
791 	if (!ospi->clk_rate) {
792 		dev_err(dev, "Invalid clock rate\n");
793 		return -EINVAL;
794 	}
795 
796 	ospi->irq = platform_get_irq(pdev, 0);
797 	if (ospi->irq < 0)
798 		return ospi->irq;
799 
800 	ret = devm_request_irq(dev, ospi->irq, stm32_ospi_irq, 0,
801 			       dev_name(dev), ospi);
802 	if (ret) {
803 		dev_err(dev, "Failed to request irq\n");
804 		return ret;
805 	}
806 
807 	ospi->rstc = devm_reset_control_array_get_optional_exclusive(dev);
808 	if (IS_ERR(ospi->rstc))
809 		return dev_err_probe(dev, PTR_ERR(ospi->rstc),
810 				     "Can't get reset\n");
811 
812 	ospi->dma_chrx = dma_request_chan(dev, "rx");
813 	if (IS_ERR(ospi->dma_chrx)) {
814 		ret = PTR_ERR(ospi->dma_chrx);
815 		ospi->dma_chrx = NULL;
816 		if (ret == -EPROBE_DEFER)
817 			goto err_dma;
818 	}
819 
820 	ospi->dma_chtx = dma_request_chan(dev, "tx");
821 	if (IS_ERR(ospi->dma_chtx)) {
822 		ret = PTR_ERR(ospi->dma_chtx);
823 		ospi->dma_chtx = NULL;
824 		if (ret == -EPROBE_DEFER)
825 			goto err_dma;
826 	}
827 
828 	node = of_parse_phandle(dev->of_node, "memory-region", 0);
829 	if (node)
830 		rmem = of_reserved_mem_lookup(node);
831 	of_node_put(node);
832 
833 	if (rmem) {
834 		ospi->mm_size = rmem->size;
835 		ospi->mm_base = devm_ioremap(dev, rmem->base, rmem->size);
836 		if (!ospi->mm_base) {
837 			dev_err(dev, "unable to map memory region: %pa+%pa\n",
838 				&rmem->base, &rmem->size);
839 			ret = -ENOMEM;
840 			goto err_dma;
841 		}
842 
843 		if (ospi->mm_size > STM32_OSPI_MAX_MMAP_SZ) {
844 			dev_err(dev, "Memory map size outsize bounds\n");
845 			ret = -EINVAL;
846 			goto err_dma;
847 		}
848 	} else {
849 		dev_info(dev, "No memory-map region found\n");
850 	}
851 
852 	init_completion(&ospi->data_completion);
853 	init_completion(&ospi->match_completion);
854 
855 	return 0;
856 
857 err_dma:
858 	dev_info(dev, "Can't get all resources (%d)\n", ret);
859 
860 	if (ospi->dma_chtx)
861 		dma_release_channel(ospi->dma_chtx);
862 	if (ospi->dma_chrx)
863 		dma_release_channel(ospi->dma_chrx);
864 
865 	return ret;
866 };
867 
868 static int stm32_ospi_probe(struct platform_device *pdev)
869 {
870 	struct device *dev = &pdev->dev;
871 	struct spi_controller *ctrl;
872 	struct stm32_ospi *ospi;
873 	struct dma_slave_config dma_cfg;
874 	struct device_node *child;
875 	int ret;
876 	u8 spi_flash_count = 0;
877 
878 	/*
879 	 * Flash subnodes sanity check:
880 	 *        1 or 2 spi-nand/spi-nor flashes		=> supported
881 	 *	  All other flash node configuration		=> not supported
882 	 */
883 	for_each_available_child_of_node(dev->of_node, child) {
884 		if (of_device_is_compatible(child, "jedec,spi-nor") ||
885 		    of_device_is_compatible(child, "spi-nand"))
886 			spi_flash_count++;
887 	}
888 
889 	if (spi_flash_count == 0 || spi_flash_count > 2) {
890 		dev_err(dev, "Incorrect DT flash node\n");
891 		return -ENODEV;
892 	}
893 
894 	ctrl = devm_spi_alloc_host(dev, sizeof(*ospi));
895 	if (!ctrl)
896 		return -ENOMEM;
897 
898 	ospi = spi_controller_get_devdata(ctrl);
899 	ospi->ctrl = ctrl;
900 
901 	ospi->dev = &pdev->dev;
902 	platform_set_drvdata(pdev, ospi);
903 
904 	ret = stm32_ospi_get_resources(pdev);
905 	if (ret)
906 		return ret;
907 
908 	memset(&dma_cfg, 0, sizeof(dma_cfg));
909 	dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
910 	dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
911 	dma_cfg.src_addr = ospi->regs_phys_base + OSPI_DR;
912 	dma_cfg.dst_addr = ospi->regs_phys_base + OSPI_DR;
913 	dma_cfg.src_maxburst = 4;
914 	dma_cfg.dst_maxburst = 4;
915 	stm32_ospi_dma_setup(ospi, &dma_cfg);
916 
917 	mutex_init(&ospi->lock);
918 
919 	ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
920 			  SPI_TX_DUAL | SPI_TX_QUAD |
921 			  SPI_TX_OCTAL | SPI_RX_OCTAL;
922 	ctrl->flags = SPI_CONTROLLER_HALF_DUPLEX;
923 	ctrl->setup = stm32_ospi_setup;
924 	ctrl->bus_num = -1;
925 	ctrl->mem_ops = &stm32_ospi_mem_ops;
926 	ctrl->use_gpio_descriptors = true;
927 	ctrl->transfer_one_message = stm32_ospi_transfer_one_message;
928 	ctrl->num_chipselect = STM32_OSPI_MAX_NORCHIP;
929 	ctrl->dev.of_node = dev->of_node;
930 
931 	pm_runtime_enable(ospi->dev);
932 	pm_runtime_set_autosuspend_delay(ospi->dev, STM32_AUTOSUSPEND_DELAY);
933 	pm_runtime_use_autosuspend(ospi->dev);
934 
935 	ret = pm_runtime_resume_and_get(ospi->dev);
936 	if (ret < 0)
937 		goto err_pm_enable;
938 
939 	if (ospi->rstc) {
940 		reset_control_assert(ospi->rstc);
941 		udelay(2);
942 		reset_control_deassert(ospi->rstc);
943 	}
944 
945 	ret = spi_register_controller(ctrl);
946 	if (ret) {
947 		/* Disable ospi */
948 		writel_relaxed(0, ospi->regs_base + OSPI_CR);
949 		goto err_pm_resume;
950 	}
951 
952 	pm_runtime_mark_last_busy(ospi->dev);
953 	pm_runtime_put_autosuspend(ospi->dev);
954 
955 	return 0;
956 
957 err_pm_resume:
958 	pm_runtime_put_sync_suspend(ospi->dev);
959 
960 err_pm_enable:
961 	pm_runtime_force_suspend(ospi->dev);
962 	mutex_destroy(&ospi->lock);
963 
964 	return ret;
965 }
966 
967 static void stm32_ospi_remove(struct platform_device *pdev)
968 {
969 	struct stm32_ospi *ospi = platform_get_drvdata(pdev);
970 	int ret;
971 
972 	ret = pm_runtime_resume_and_get(ospi->dev);
973 	if (ret < 0)
974 		return;
975 
976 	spi_unregister_controller(ospi->ctrl);
977 	/* Disable ospi */
978 	writel_relaxed(0, ospi->regs_base + OSPI_CR);
979 	mutex_destroy(&ospi->lock);
980 
981 	if (ospi->dma_chtx)
982 		dma_release_channel(ospi->dma_chtx);
983 	if (ospi->dma_chrx)
984 		dma_release_channel(ospi->dma_chrx);
985 
986 	pm_runtime_put_sync_suspend(ospi->dev);
987 	pm_runtime_force_suspend(ospi->dev);
988 }
989 
990 static int __maybe_unused stm32_ospi_suspend(struct device *dev)
991 {
992 	struct stm32_ospi *ospi = dev_get_drvdata(dev);
993 
994 	pinctrl_pm_select_sleep_state(dev);
995 
996 	return pm_runtime_force_suspend(ospi->dev);
997 }
998 
999 static int __maybe_unused stm32_ospi_resume(struct device *dev)
1000 {
1001 	struct stm32_ospi *ospi = dev_get_drvdata(dev);
1002 	void __iomem *regs_base = ospi->regs_base;
1003 	int ret;
1004 
1005 	ret = pm_runtime_force_resume(ospi->dev);
1006 	if (ret < 0)
1007 		return ret;
1008 
1009 	pinctrl_pm_select_default_state(dev);
1010 
1011 	ret = pm_runtime_resume_and_get(ospi->dev);
1012 	if (ret < 0)
1013 		return ret;
1014 
1015 	writel_relaxed(ospi->cr_reg, regs_base + OSPI_CR);
1016 	writel_relaxed(ospi->dcr_reg, regs_base + OSPI_DCR1);
1017 	pm_runtime_mark_last_busy(ospi->dev);
1018 	pm_runtime_put_autosuspend(ospi->dev);
1019 
1020 	return 0;
1021 }
1022 
1023 static int __maybe_unused stm32_ospi_runtime_suspend(struct device *dev)
1024 {
1025 	struct stm32_ospi *ospi = dev_get_drvdata(dev);
1026 
1027 	clk_disable_unprepare(ospi->clk);
1028 
1029 	return 0;
1030 }
1031 
1032 static int __maybe_unused stm32_ospi_runtime_resume(struct device *dev)
1033 {
1034 	struct stm32_ospi *ospi = dev_get_drvdata(dev);
1035 
1036 	return clk_prepare_enable(ospi->clk);
1037 }
1038 
1039 static const struct dev_pm_ops stm32_ospi_pm_ops = {
1040 	SET_SYSTEM_SLEEP_PM_OPS(stm32_ospi_suspend, stm32_ospi_resume)
1041 	SET_RUNTIME_PM_OPS(stm32_ospi_runtime_suspend,
1042 			   stm32_ospi_runtime_resume, NULL)
1043 };
1044 
1045 static const struct of_device_id stm32_ospi_of_match[] = {
1046 	{ .compatible = "st,stm32mp25-ospi" },
1047 	{},
1048 };
1049 MODULE_DEVICE_TABLE(of, stm32_ospi_of_match);
1050 
1051 static struct platform_driver stm32_ospi_driver = {
1052 	.probe	= stm32_ospi_probe,
1053 	.remove	= stm32_ospi_remove,
1054 	.driver	= {
1055 		.name = "stm32-ospi",
1056 		.pm = &stm32_ospi_pm_ops,
1057 		.of_match_table = stm32_ospi_of_match,
1058 	},
1059 };
1060 module_platform_driver(stm32_ospi_driver);
1061 
1062 MODULE_DESCRIPTION("STMicroelectronics STM32 OCTO SPI driver");
1063 MODULE_LICENSE("GPL");
1064