xref: /linux/drivers/spi/spi-stm32-ospi.c (revision a5210135489ae7bc1ef1cb4a8157361dd7b468cd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) STMicroelectronics 2025 - All Rights Reserved
4  */
5 
6 #include <linux/bitfield.h>
7 #include <linux/clk.h>
8 #include <linux/delay.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dmaengine.h>
11 #include <linux/err.h>
12 #include <linux/errno.h>
13 #include <linux/gpio/consumer.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/iopoll.h>
17 #include <linux/mfd/syscon.h>
18 #include <linux/module.h>
19 #include <linux/mutex.h>
20 #include <linux/of.h>
21 #include <linux/of_address.h>
22 #include <linux/of_device.h>
23 #include <linux/of_reserved_mem.h>
24 #include <linux/pinctrl/consumer.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/reset.h>
28 #include <linux/sizes.h>
29 #include <linux/spi/spi-mem.h>
30 #include <linux/types.h>
31 
32 #define OSPI_CR			0x00
33 #define CR_EN			BIT(0)
34 #define CR_ABORT		BIT(1)
35 #define CR_DMAEN		BIT(2)
36 #define CR_FTHRES_SHIFT		8
37 #define CR_SMIE			BIT(19)
38 #define CR_APMS			BIT(22)
39 #define CR_CSSEL		BIT(24)
40 #define CR_FMODE_MASK		GENMASK(29, 28)
41 #define CR_FMODE_INDW		(0U)
42 #define CR_FMODE_INDR		(1U)
43 #define CR_FMODE_APM		(2U)
44 #define CR_FMODE_MM		(3U)
45 
46 #define OSPI_DCR1		0x08
47 #define DCR1_DLYBYP		BIT(3)
48 #define DCR1_DEVSIZE_MASK	GENMASK(20, 16)
49 #define DCR1_MTYP_MASK		GENMASK(26, 24)
50 #define DCR1_MTYP_MX_MODE	1
51 #define DCR1_MTYP_HP_MEMMODE	4
52 
53 #define OSPI_DCR2		0x0c
54 #define DCR2_PRESC_MASK		GENMASK(7, 0)
55 
56 #define OSPI_SR			0x20
57 #define SR_TEF			BIT(0)
58 #define SR_TCF			BIT(1)
59 #define SR_FTF			BIT(2)
60 #define SR_SMF			BIT(3)
61 #define SR_BUSY			BIT(5)
62 
63 #define OSPI_FCR		0x24
64 #define FCR_CTEF		BIT(0)
65 #define FCR_CTCF		BIT(1)
66 #define FCR_CSMF		BIT(3)
67 
68 #define OSPI_DLR		0x40
69 #define OSPI_AR			0x48
70 #define OSPI_DR			0x50
71 #define OSPI_PSMKR		0x80
72 #define OSPI_PSMAR		0x88
73 
74 #define OSPI_CCR		0x100
75 #define CCR_IMODE_MASK		GENMASK(2, 0)
76 #define CCR_IDTR		BIT(3)
77 #define CCR_ISIZE_MASK		GENMASK(5, 4)
78 #define CCR_ADMODE_MASK		GENMASK(10, 8)
79 #define CCR_ADMODE_8LINES	4
80 #define CCR_ADDTR		BIT(11)
81 #define CCR_ADSIZE_MASK		GENMASK(13, 12)
82 #define CCR_ADSIZE_32BITS	3
83 #define CCR_DMODE_MASK		GENMASK(26, 24)
84 #define CCR_DMODE_8LINES	4
85 #define CCR_DQSE		BIT(29)
86 #define CCR_DDTR		BIT(27)
87 #define CCR_BUSWIDTH_0		0x0
88 #define CCR_BUSWIDTH_1		0x1
89 #define CCR_BUSWIDTH_2		0x2
90 #define CCR_BUSWIDTH_4		0x3
91 #define CCR_BUSWIDTH_8		0x4
92 
93 #define OSPI_TCR		0x108
94 #define TCR_DCYC_MASK		GENMASK(4, 0)
95 #define TCR_DHQC		BIT(28)
96 #define TCR_SSHIFT		BIT(30)
97 
98 #define OSPI_IR			0x110
99 
100 #define STM32_OSPI_MAX_MMAP_SZ	SZ_256M
101 #define STM32_OSPI_MAX_NORCHIP	2
102 
103 #define STM32_FIFO_TIMEOUT_US		30000
104 #define STM32_ABT_TIMEOUT_US		100000
105 #define STM32_COMP_TIMEOUT_MS		5000
106 #define STM32_BUSY_TIMEOUT_US		100000
107 #define STM32_WAIT_CMD_TIMEOUT_US	5000
108 
109 #define STM32_AUTOSUSPEND_DELAY -1
110 
111 struct stm32_ospi {
112 	struct device *dev;
113 	struct spi_controller *ctrl;
114 	struct clk *clk;
115 	struct reset_control *rstc;
116 
117 	struct completion match_completion;
118 
119 	struct dma_chan *dma_chtx;
120 	struct dma_chan *dma_chrx;
121 	struct completion dma_completion;
122 
123 	void __iomem *regs_base;
124 	void __iomem *mm_base;
125 	phys_addr_t regs_phys_base;
126 	resource_size_t mm_size;
127 	u32 clk_rate;
128 	u32 fmode;
129 	u32 cr_reg;
130 	u32 dcr_reg;
131 	u32 flash_presc[STM32_OSPI_MAX_NORCHIP];
132 	int irq;
133 	unsigned long status_timeout;
134 
135 	/*
136 	 * To protect device configuration, could be different between
137 	 * 2 flash access
138 	 */
139 	struct mutex lock;
140 };
141 
stm32_ospi_read_fifo(void * val,void __iomem * addr,u8 len)142 static void stm32_ospi_read_fifo(void *val, void __iomem *addr, u8 len)
143 {
144 	switch (len) {
145 	case sizeof(u32):
146 		*((u32 *)val) = readl_relaxed(addr);
147 		break;
148 	case sizeof(u16):
149 		*((u16 *)val) = readw_relaxed(addr);
150 		break;
151 	case sizeof(u8):
152 		*((u8 *)val) = readb_relaxed(addr);
153 	}
154 }
155 
stm32_ospi_write_fifo(void * val,void __iomem * addr,u8 len)156 static void stm32_ospi_write_fifo(void *val, void __iomem *addr, u8 len)
157 {
158 	switch (len) {
159 	case sizeof(u32):
160 		writel_relaxed(*((u32 *)val), addr);
161 		break;
162 	case sizeof(u16):
163 		writew_relaxed(*((u16 *)val), addr);
164 		break;
165 	case sizeof(u8):
166 		writeb_relaxed(*((u8 *)val), addr);
167 	}
168 }
169 
stm32_ospi_abort(struct stm32_ospi * ospi)170 static int stm32_ospi_abort(struct stm32_ospi *ospi)
171 {
172 	void __iomem *regs_base = ospi->regs_base;
173 	u32 cr;
174 	int timeout;
175 
176 	cr = readl_relaxed(regs_base + OSPI_CR) | CR_ABORT;
177 	writel_relaxed(cr, regs_base + OSPI_CR);
178 
179 	/* wait clear of abort bit by hw */
180 	timeout = readl_relaxed_poll_timeout_atomic(regs_base + OSPI_CR,
181 						    cr, !(cr & CR_ABORT), 1,
182 						    STM32_ABT_TIMEOUT_US);
183 
184 	if (timeout)
185 		dev_err(ospi->dev, "%s abort timeout:%d\n", __func__, timeout);
186 
187 	return timeout;
188 }
189 
stm32_ospi_poll(struct stm32_ospi * ospi,void * buf,u32 len,bool read)190 static int stm32_ospi_poll(struct stm32_ospi *ospi, void *buf, u32 len, bool read)
191 {
192 	void __iomem *regs_base = ospi->regs_base;
193 	void (*fifo)(void *val, void __iomem *addr, u8 len);
194 	u32 sr;
195 	int ret;
196 	u8 step;
197 
198 	if (read)
199 		fifo = stm32_ospi_read_fifo;
200 	else
201 		fifo = stm32_ospi_write_fifo;
202 
203 	while (len) {
204 		ret = readl_relaxed_poll_timeout_atomic(regs_base + OSPI_SR,
205 							sr, sr & SR_FTF, 1,
206 							STM32_FIFO_TIMEOUT_US);
207 		if (ret) {
208 			dev_err(ospi->dev, "fifo timeout (len:%d stat:%#x)\n",
209 				len, sr);
210 			return ret;
211 		}
212 
213 		if (len >= sizeof(u32))
214 			step = sizeof(u32);
215 		else if (len >= sizeof(u16))
216 			step = sizeof(u16);
217 		else
218 			step = sizeof(u8);
219 
220 		fifo(buf, regs_base + OSPI_DR, step);
221 		len -= step;
222 		buf += step;
223 	}
224 
225 	return 0;
226 }
227 
stm32_ospi_wait_nobusy(struct stm32_ospi * ospi)228 static int stm32_ospi_wait_nobusy(struct stm32_ospi *ospi)
229 {
230 	u32 sr;
231 
232 	return readl_relaxed_poll_timeout_atomic(ospi->regs_base + OSPI_SR,
233 						 sr, !(sr & SR_BUSY), 1,
234 						 STM32_BUSY_TIMEOUT_US);
235 }
236 
stm32_ospi_wait_cmd(struct stm32_ospi * ospi)237 static int stm32_ospi_wait_cmd(struct stm32_ospi *ospi)
238 {
239 	void __iomem *regs_base = ospi->regs_base;
240 	u32 sr;
241 	int err = 0;
242 
243 	if (ospi->fmode == CR_FMODE_APM)
244 		goto out;
245 
246 	err = readl_relaxed_poll_timeout_atomic(ospi->regs_base + OSPI_SR, sr,
247 						(sr & (SR_TEF | SR_TCF)), 1,
248 						STM32_WAIT_CMD_TIMEOUT_US);
249 
250 	if (sr & SR_TCF)
251 		/* avoid false timeout */
252 		err = 0;
253 	if (sr & SR_TEF)
254 		err = -EIO;
255 
256 out:
257 	/* clear flags */
258 	writel_relaxed(FCR_CTCF | FCR_CTEF, regs_base + OSPI_FCR);
259 
260 	if (!err)
261 		err = stm32_ospi_wait_nobusy(ospi);
262 
263 	return err;
264 }
265 
stm32_ospi_dma_callback(void * arg)266 static void stm32_ospi_dma_callback(void *arg)
267 {
268 	struct completion *dma_completion = arg;
269 
270 	complete(dma_completion);
271 }
272 
stm32_ospi_irq(int irq,void * dev_id)273 static irqreturn_t stm32_ospi_irq(int irq, void *dev_id)
274 {
275 	struct stm32_ospi *ospi = (struct stm32_ospi *)dev_id;
276 	void __iomem *regs_base = ospi->regs_base;
277 	u32 cr, sr;
278 
279 	cr = readl_relaxed(regs_base + OSPI_CR);
280 	sr = readl_relaxed(regs_base + OSPI_SR);
281 
282 	if (sr & SR_SMF) {
283 		/* disable irq */
284 		cr &= ~CR_SMIE;
285 		writel_relaxed(cr, regs_base + OSPI_CR);
286 		complete(&ospi->match_completion);
287 	}
288 
289 	return IRQ_HANDLED;
290 }
291 
stm32_ospi_dma_setup(struct stm32_ospi * ospi,struct dma_slave_config * dma_cfg)292 static int stm32_ospi_dma_setup(struct stm32_ospi *ospi,
293 				struct dma_slave_config *dma_cfg)
294 {
295 	struct dma_slave_caps caps;
296 	int ret = 0;
297 
298 	if (dma_cfg && ospi->dma_chrx) {
299 		ret = dma_get_slave_caps(ospi->dma_chrx, &caps);
300 		if (ret)
301 			return ret;
302 
303 		dma_cfg->src_maxburst = caps.max_burst / dma_cfg->src_addr_width;
304 
305 		if (dmaengine_slave_config(ospi->dma_chrx, dma_cfg)) {
306 			dev_err(ospi->dev, "dma rx config failed\n");
307 			dma_release_channel(ospi->dma_chrx);
308 			ospi->dma_chrx = NULL;
309 		}
310 	}
311 
312 	if (dma_cfg && ospi->dma_chtx) {
313 		ret = dma_get_slave_caps(ospi->dma_chtx, &caps);
314 		if (ret)
315 			return ret;
316 
317 		dma_cfg->dst_maxburst = caps.max_burst / dma_cfg->dst_addr_width;
318 
319 		if (dmaengine_slave_config(ospi->dma_chtx, dma_cfg)) {
320 			dev_err(ospi->dev, "dma tx config failed\n");
321 			dma_release_channel(ospi->dma_chtx);
322 			ospi->dma_chtx = NULL;
323 		}
324 	}
325 
326 	init_completion(&ospi->dma_completion);
327 
328 	return ret;
329 }
330 
stm32_ospi_tx_mm(struct stm32_ospi * ospi,const struct spi_mem_op * op)331 static int stm32_ospi_tx_mm(struct stm32_ospi *ospi,
332 			    const struct spi_mem_op *op)
333 {
334 	memcpy_fromio(op->data.buf.in, ospi->mm_base + op->addr.val,
335 		      op->data.nbytes);
336 	return 0;
337 }
338 
stm32_ospi_tx_dma(struct stm32_ospi * ospi,const struct spi_mem_op * op)339 static int stm32_ospi_tx_dma(struct stm32_ospi *ospi,
340 			     const struct spi_mem_op *op)
341 {
342 	struct dma_async_tx_descriptor *desc;
343 	void __iomem *regs_base = ospi->regs_base;
344 	enum dma_transfer_direction dma_dir;
345 	struct dma_chan *dma_ch;
346 	struct sg_table sgt;
347 	dma_cookie_t cookie;
348 	u32 cr, t_out;
349 	int err;
350 
351 	if (op->data.dir == SPI_MEM_DATA_IN) {
352 		dma_dir = DMA_DEV_TO_MEM;
353 		dma_ch = ospi->dma_chrx;
354 	} else {
355 		dma_dir = DMA_MEM_TO_DEV;
356 		dma_ch = ospi->dma_chtx;
357 	}
358 
359 	/*
360 	 * Spi_map_buf return -EINVAL if the buffer is not DMA-able
361 	 * (DMA-able: in vmalloc | kmap | virt_addr_valid)
362 	 */
363 	err = spi_controller_dma_map_mem_op_data(ospi->ctrl, op, &sgt);
364 	if (err)
365 		return err;
366 
367 	desc = dmaengine_prep_slave_sg(dma_ch, sgt.sgl, sgt.nents,
368 				       dma_dir, DMA_PREP_INTERRUPT);
369 	if (!desc) {
370 		err = -ENOMEM;
371 		goto out_unmap;
372 	}
373 
374 	cr = readl_relaxed(regs_base + OSPI_CR);
375 
376 	reinit_completion(&ospi->dma_completion);
377 	desc->callback = stm32_ospi_dma_callback;
378 	desc->callback_param = &ospi->dma_completion;
379 	cookie = dmaengine_submit(desc);
380 	err = dma_submit_error(cookie);
381 	if (err)
382 		goto out;
383 
384 	dma_async_issue_pending(dma_ch);
385 
386 	writel_relaxed(cr | CR_DMAEN, regs_base + OSPI_CR);
387 
388 	t_out = sgt.nents * STM32_COMP_TIMEOUT_MS;
389 	if (!wait_for_completion_timeout(&ospi->dma_completion,
390 					 msecs_to_jiffies(t_out)))
391 		err = -ETIMEDOUT;
392 
393 	if (err)
394 		dmaengine_terminate_all(dma_ch);
395 
396 out:
397 	writel_relaxed(cr & ~CR_DMAEN, regs_base + OSPI_CR);
398 out_unmap:
399 	spi_controller_dma_unmap_mem_op_data(ospi->ctrl, op, &sgt);
400 
401 	return err;
402 }
403 
stm32_ospi_xfer(struct stm32_ospi * ospi,const struct spi_mem_op * op)404 static int stm32_ospi_xfer(struct stm32_ospi *ospi, const struct spi_mem_op *op)
405 {
406 	u8 *buf;
407 
408 	if (!op->data.nbytes)
409 		return 0;
410 
411 	if (ospi->fmode == CR_FMODE_MM)
412 		return stm32_ospi_tx_mm(ospi, op);
413 	else if (((op->data.dir == SPI_MEM_DATA_IN && ospi->dma_chrx) ||
414 		 (op->data.dir == SPI_MEM_DATA_OUT && ospi->dma_chtx)) &&
415 		  op->data.nbytes > 8)
416 		if (!stm32_ospi_tx_dma(ospi, op))
417 			return 0;
418 
419 	if (op->data.dir == SPI_MEM_DATA_IN)
420 		buf = op->data.buf.in;
421 	else
422 		buf = (void *)op->data.buf.out;
423 
424 	return stm32_ospi_poll(ospi, buf, op->data.nbytes,
425 			       op->data.dir == SPI_MEM_DATA_IN);
426 }
427 
stm32_ospi_wait_poll_status(struct stm32_ospi * ospi,const struct spi_mem_op * op)428 static int stm32_ospi_wait_poll_status(struct stm32_ospi *ospi,
429 				       const struct spi_mem_op *op)
430 {
431 	void __iomem *regs_base = ospi->regs_base;
432 	u32 cr;
433 
434 	reinit_completion(&ospi->match_completion);
435 	cr = readl_relaxed(regs_base + OSPI_CR);
436 	writel_relaxed(cr | CR_SMIE, regs_base + OSPI_CR);
437 
438 	if (!wait_for_completion_timeout(&ospi->match_completion,
439 					 msecs_to_jiffies(ospi->status_timeout))) {
440 		u32 sr = readl_relaxed(regs_base + OSPI_SR);
441 
442 		/* Avoid false timeout */
443 		if (!(sr & SR_SMF))
444 			return -ETIMEDOUT;
445 	}
446 
447 	writel_relaxed(FCR_CSMF, regs_base + OSPI_FCR);
448 
449 	return 0;
450 }
451 
stm32_ospi_get_mode(u8 buswidth)452 static int stm32_ospi_get_mode(u8 buswidth)
453 {
454 	switch (buswidth) {
455 	case 8:
456 		return CCR_BUSWIDTH_8;
457 	case 4:
458 		return CCR_BUSWIDTH_4;
459 	default:
460 		return buswidth;
461 	}
462 }
463 
stm32_ospi_send(struct spi_device * spi,const struct spi_mem_op * op)464 static int stm32_ospi_send(struct spi_device *spi, const struct spi_mem_op *op)
465 {
466 	struct stm32_ospi *ospi = spi_controller_get_devdata(spi->controller);
467 	void __iomem *regs_base = ospi->regs_base;
468 	u32 ccr, cr, dcr2, tcr;
469 	int timeout, err = 0, err_poll_status = 0;
470 	u8 cs = spi->chip_select[ffs(spi->cs_index_mask) - 1];
471 
472 	cr = readl_relaxed(ospi->regs_base + OSPI_CR);
473 	cr &= ~CR_CSSEL;
474 	cr |= FIELD_PREP(CR_CSSEL, cs);
475 	cr &= ~CR_FMODE_MASK;
476 	cr |= FIELD_PREP(CR_FMODE_MASK, ospi->fmode);
477 	writel_relaxed(cr, regs_base + OSPI_CR);
478 
479 	if (op->data.nbytes)
480 		writel_relaxed(op->data.nbytes - 1, regs_base + OSPI_DLR);
481 
482 	/* set prescaler */
483 	dcr2 = readl_relaxed(regs_base + OSPI_DCR2);
484 	dcr2 |= FIELD_PREP(DCR2_PRESC_MASK, ospi->flash_presc[cs]);
485 	writel_relaxed(dcr2, regs_base + OSPI_DCR2);
486 
487 	ccr = FIELD_PREP(CCR_IMODE_MASK, stm32_ospi_get_mode(op->cmd.buswidth));
488 
489 	if (op->addr.nbytes) {
490 		ccr |= FIELD_PREP(CCR_ADMODE_MASK,
491 				  stm32_ospi_get_mode(op->addr.buswidth));
492 		ccr |= FIELD_PREP(CCR_ADSIZE_MASK, op->addr.nbytes - 1);
493 	}
494 
495 	tcr = TCR_SSHIFT;
496 	if (op->dummy.buswidth && op->dummy.nbytes) {
497 		tcr |= FIELD_PREP(TCR_DCYC_MASK,
498 				  op->dummy.nbytes * 8 / op->dummy.buswidth);
499 	}
500 	writel_relaxed(tcr, regs_base + OSPI_TCR);
501 
502 	if (op->data.nbytes) {
503 		ccr |= FIELD_PREP(CCR_DMODE_MASK,
504 				  stm32_ospi_get_mode(op->data.buswidth));
505 	}
506 
507 	writel_relaxed(ccr, regs_base + OSPI_CCR);
508 
509 	/* set instruction, must be set after ccr register update */
510 	writel_relaxed(op->cmd.opcode, regs_base + OSPI_IR);
511 
512 	if (op->addr.nbytes && ospi->fmode != CR_FMODE_MM)
513 		writel_relaxed(op->addr.val, regs_base + OSPI_AR);
514 
515 	if (ospi->fmode == CR_FMODE_APM)
516 		err_poll_status = stm32_ospi_wait_poll_status(ospi, op);
517 
518 	err = stm32_ospi_xfer(ospi, op);
519 
520 	/*
521 	 * Abort in:
522 	 * -error case
523 	 * -read memory map: prefetching must be stopped if we read the last
524 	 *  byte of device (device size - fifo size). like device size is not
525 	 *  knows, the prefetching is always stop.
526 	 */
527 	if (err || err_poll_status || ospi->fmode == CR_FMODE_MM)
528 		goto abort;
529 
530 	/* Wait end of tx in indirect mode */
531 	err = stm32_ospi_wait_cmd(ospi);
532 	if (err)
533 		goto abort;
534 
535 	return 0;
536 
537 abort:
538 	timeout = stm32_ospi_abort(ospi);
539 	writel_relaxed(FCR_CTCF | FCR_CSMF, regs_base + OSPI_FCR);
540 
541 	if (err || err_poll_status || timeout)
542 		dev_err(ospi->dev, "%s err:%d err_poll_status:%d abort timeout:%d\n",
543 			__func__, err, err_poll_status, timeout);
544 
545 	return err;
546 }
547 
stm32_ospi_poll_status(struct spi_mem * mem,const struct spi_mem_op * op,u16 mask,u16 match,unsigned long initial_delay_us,unsigned long polling_rate_us,unsigned long timeout_ms)548 static int stm32_ospi_poll_status(struct spi_mem *mem,
549 				  const struct spi_mem_op *op,
550 				  u16 mask, u16 match,
551 				  unsigned long initial_delay_us,
552 				  unsigned long polling_rate_us,
553 				  unsigned long timeout_ms)
554 {
555 	struct stm32_ospi *ospi = spi_controller_get_devdata(mem->spi->controller);
556 	void __iomem *regs_base = ospi->regs_base;
557 	int ret;
558 
559 	ret = pm_runtime_resume_and_get(ospi->dev);
560 	if (ret < 0)
561 		return ret;
562 
563 	mutex_lock(&ospi->lock);
564 
565 	writel_relaxed(mask, regs_base + OSPI_PSMKR);
566 	writel_relaxed(match, regs_base + OSPI_PSMAR);
567 	ospi->fmode = CR_FMODE_APM;
568 	ospi->status_timeout = timeout_ms;
569 
570 	ret = stm32_ospi_send(mem->spi, op);
571 	mutex_unlock(&ospi->lock);
572 
573 	pm_runtime_put_autosuspend(ospi->dev);
574 
575 	return ret;
576 }
577 
stm32_ospi_exec_op(struct spi_mem * mem,const struct spi_mem_op * op)578 static int stm32_ospi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
579 {
580 	struct stm32_ospi *ospi = spi_controller_get_devdata(mem->spi->controller);
581 	int ret;
582 
583 	ret = pm_runtime_resume_and_get(ospi->dev);
584 	if (ret < 0)
585 		return ret;
586 
587 	mutex_lock(&ospi->lock);
588 	if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes)
589 		ospi->fmode = CR_FMODE_INDR;
590 	else
591 		ospi->fmode = CR_FMODE_INDW;
592 
593 	ret = stm32_ospi_send(mem->spi, op);
594 	mutex_unlock(&ospi->lock);
595 
596 	pm_runtime_put_autosuspend(ospi->dev);
597 
598 	return ret;
599 }
600 
stm32_ospi_dirmap_create(struct spi_mem_dirmap_desc * desc)601 static int stm32_ospi_dirmap_create(struct spi_mem_dirmap_desc *desc)
602 {
603 	struct stm32_ospi *ospi = spi_controller_get_devdata(desc->mem->spi->controller);
604 
605 	if (desc->info.op_tmpl.data.dir == SPI_MEM_DATA_OUT)
606 		return -EOPNOTSUPP;
607 
608 	/* Should never happen, as mm_base == null is an error probe exit condition */
609 	if (!ospi->mm_base && desc->info.op_tmpl.data.dir == SPI_MEM_DATA_IN)
610 		return -EOPNOTSUPP;
611 
612 	if (!ospi->mm_size)
613 		return -EOPNOTSUPP;
614 
615 	return 0;
616 }
617 
stm32_ospi_dirmap_read(struct spi_mem_dirmap_desc * desc,u64 offs,size_t len,void * buf)618 static ssize_t stm32_ospi_dirmap_read(struct spi_mem_dirmap_desc *desc,
619 				      u64 offs, size_t len, void *buf)
620 {
621 	struct stm32_ospi *ospi = spi_controller_get_devdata(desc->mem->spi->controller);
622 	struct spi_mem_op op;
623 	u32 addr_max;
624 	int ret;
625 
626 	ret = pm_runtime_resume_and_get(ospi->dev);
627 	if (ret < 0)
628 		return ret;
629 
630 	mutex_lock(&ospi->lock);
631 	/*
632 	 * Make a local copy of desc op_tmpl and complete dirmap rdesc
633 	 * spi_mem_op template with offs, len and *buf in  order to get
634 	 * all needed transfer information into struct spi_mem_op
635 	 */
636 	memcpy(&op, &desc->info.op_tmpl, sizeof(struct spi_mem_op));
637 	dev_dbg(ospi->dev, "%s len = 0x%zx offs = 0x%llx buf = 0x%p\n", __func__, len, offs, buf);
638 
639 	op.data.nbytes = len;
640 	op.addr.val = desc->info.offset + offs;
641 	op.data.buf.in = buf;
642 
643 	addr_max = op.addr.val + op.data.nbytes + 1;
644 	if (addr_max < ospi->mm_size && op.addr.buswidth)
645 		ospi->fmode = CR_FMODE_MM;
646 	else
647 		ospi->fmode = CR_FMODE_INDR;
648 
649 	ret = stm32_ospi_send(desc->mem->spi, &op);
650 	mutex_unlock(&ospi->lock);
651 
652 	pm_runtime_put_autosuspend(ospi->dev);
653 
654 	return ret ?: len;
655 }
656 
stm32_ospi_transfer_one_message(struct spi_controller * ctrl,struct spi_message * msg)657 static int stm32_ospi_transfer_one_message(struct spi_controller *ctrl,
658 					   struct spi_message *msg)
659 {
660 	struct stm32_ospi *ospi = spi_controller_get_devdata(ctrl);
661 	struct spi_transfer *transfer;
662 	struct spi_device *spi = msg->spi;
663 	struct spi_mem_op op;
664 	struct gpio_desc *cs_gpiod = spi->cs_gpiod[ffs(spi->cs_index_mask) - 1];
665 	int ret = 0;
666 
667 	if (!cs_gpiod)
668 		return -EOPNOTSUPP;
669 
670 	ret = pm_runtime_resume_and_get(ospi->dev);
671 	if (ret < 0)
672 		return ret;
673 
674 	mutex_lock(&ospi->lock);
675 
676 	gpiod_set_value_cansleep(cs_gpiod, true);
677 
678 	list_for_each_entry(transfer, &msg->transfers, transfer_list) {
679 		u8 dummy_bytes = 0;
680 
681 		memset(&op, 0, sizeof(op));
682 
683 		dev_dbg(ospi->dev, "tx_buf:%p tx_nbits:%d rx_buf:%p rx_nbits:%d len:%d dummy_data:%d\n",
684 			transfer->tx_buf, transfer->tx_nbits,
685 			transfer->rx_buf, transfer->rx_nbits,
686 			transfer->len, transfer->dummy_data);
687 
688 		/*
689 		 * OSPI hardware supports dummy bytes transfer.
690 		 * If current transfer is dummy byte, merge it with the next
691 		 * transfer in order to take into account OSPI block constraint
692 		 */
693 		if (transfer->dummy_data) {
694 			op.dummy.buswidth = transfer->tx_nbits;
695 			op.dummy.nbytes = transfer->len;
696 			dummy_bytes = transfer->len;
697 
698 			/* If happens, means that message is not correctly built */
699 			if (list_is_last(&transfer->transfer_list, &msg->transfers)) {
700 				ret = -EINVAL;
701 				goto end_of_transfer;
702 			}
703 
704 			transfer = list_next_entry(transfer, transfer_list);
705 		}
706 
707 		op.data.nbytes = transfer->len;
708 
709 		if (transfer->rx_buf) {
710 			ospi->fmode = CR_FMODE_INDR;
711 			op.data.buswidth = transfer->rx_nbits;
712 			op.data.dir = SPI_MEM_DATA_IN;
713 			op.data.buf.in = transfer->rx_buf;
714 		} else {
715 			ospi->fmode = CR_FMODE_INDW;
716 			op.data.buswidth = transfer->tx_nbits;
717 			op.data.dir = SPI_MEM_DATA_OUT;
718 			op.data.buf.out = transfer->tx_buf;
719 		}
720 
721 		ret = stm32_ospi_send(spi, &op);
722 		if (ret)
723 			goto end_of_transfer;
724 
725 		msg->actual_length += transfer->len + dummy_bytes;
726 	}
727 
728 end_of_transfer:
729 	gpiod_set_value_cansleep(cs_gpiod, false);
730 
731 	mutex_unlock(&ospi->lock);
732 
733 	msg->status = ret;
734 	spi_finalize_current_message(ctrl);
735 
736 	pm_runtime_put_autosuspend(ospi->dev);
737 
738 	return ret;
739 }
740 
stm32_ospi_setup(struct spi_device * spi)741 static int stm32_ospi_setup(struct spi_device *spi)
742 {
743 	struct spi_controller *ctrl = spi->controller;
744 	struct stm32_ospi *ospi = spi_controller_get_devdata(ctrl);
745 	void __iomem *regs_base = ospi->regs_base;
746 	int ret;
747 	u8 cs = spi->chip_select[ffs(spi->cs_index_mask) - 1];
748 
749 	if (ctrl->busy)
750 		return -EBUSY;
751 
752 	if (!spi->max_speed_hz)
753 		return -EINVAL;
754 
755 	ret = pm_runtime_resume_and_get(ospi->dev);
756 	if (ret < 0)
757 		return ret;
758 
759 	ospi->flash_presc[cs] = DIV_ROUND_UP(ospi->clk_rate, spi->max_speed_hz) - 1;
760 
761 	mutex_lock(&ospi->lock);
762 
763 	ospi->cr_reg = CR_APMS | 3 << CR_FTHRES_SHIFT | CR_EN;
764 	writel_relaxed(ospi->cr_reg, regs_base + OSPI_CR);
765 
766 	/* set dcr fsize to max address */
767 	ospi->dcr_reg = DCR1_DEVSIZE_MASK | DCR1_DLYBYP;
768 	writel_relaxed(ospi->dcr_reg, regs_base + OSPI_DCR1);
769 
770 	mutex_unlock(&ospi->lock);
771 
772 	pm_runtime_put_autosuspend(ospi->dev);
773 
774 	return 0;
775 }
776 
777 /*
778  * No special host constraint, so use default spi_mem_default_supports_op
779  * to check supported mode.
780  */
781 static const struct spi_controller_mem_ops stm32_ospi_mem_ops = {
782 	.exec_op	= stm32_ospi_exec_op,
783 	.dirmap_create	= stm32_ospi_dirmap_create,
784 	.dirmap_read	= stm32_ospi_dirmap_read,
785 	.poll_status	= stm32_ospi_poll_status,
786 };
787 
stm32_ospi_get_resources(struct platform_device * pdev)788 static int stm32_ospi_get_resources(struct platform_device *pdev)
789 {
790 	struct device *dev = &pdev->dev;
791 	struct stm32_ospi *ospi = platform_get_drvdata(pdev);
792 	struct resource *res, _res;
793 	int ret;
794 
795 	ospi->regs_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
796 	if (IS_ERR(ospi->regs_base))
797 		return PTR_ERR(ospi->regs_base);
798 
799 	ospi->regs_phys_base = res->start;
800 
801 	ospi->clk = devm_clk_get(dev, NULL);
802 	if (IS_ERR(ospi->clk))
803 		return dev_err_probe(dev, PTR_ERR(ospi->clk),
804 				     "Can't get clock\n");
805 
806 	ospi->clk_rate = clk_get_rate(ospi->clk);
807 	if (!ospi->clk_rate) {
808 		dev_err(dev, "Invalid clock rate\n");
809 		return -EINVAL;
810 	}
811 
812 	ospi->irq = platform_get_irq(pdev, 0);
813 	if (ospi->irq < 0)
814 		return ospi->irq;
815 
816 	ret = devm_request_irq(dev, ospi->irq, stm32_ospi_irq, 0,
817 			       dev_name(dev), ospi);
818 	if (ret) {
819 		dev_err(dev, "Failed to request irq\n");
820 		return ret;
821 	}
822 
823 	ospi->rstc = devm_reset_control_array_get_exclusive_released(dev);
824 	if (IS_ERR(ospi->rstc))
825 		return dev_err_probe(dev, PTR_ERR(ospi->rstc),
826 				     "Can't get reset\n");
827 
828 	ospi->dma_chrx = dma_request_chan(dev, "rx");
829 	if (IS_ERR(ospi->dma_chrx)) {
830 		ret = PTR_ERR(ospi->dma_chrx);
831 		ospi->dma_chrx = NULL;
832 		if (ret == -EPROBE_DEFER)
833 			goto err_dma;
834 	}
835 
836 	ospi->dma_chtx = dma_request_chan(dev, "tx");
837 	if (IS_ERR(ospi->dma_chtx)) {
838 		ret = PTR_ERR(ospi->dma_chtx);
839 		ospi->dma_chtx = NULL;
840 		if (ret == -EPROBE_DEFER)
841 			goto err_dma;
842 	}
843 
844 	res = &_res;
845 	ret = of_reserved_mem_region_to_resource(dev->of_node, 0, res);
846 	if (!ret) {
847 		ospi->mm_size = resource_size(res);
848 		ospi->mm_base = devm_ioremap_resource(dev, res);
849 		if (IS_ERR(ospi->mm_base)) {
850 			dev_err(dev, "unable to map memory region: %pR\n", res);
851 			ret = PTR_ERR(ospi->mm_base);
852 			goto err_dma;
853 		}
854 
855 		if (ospi->mm_size > STM32_OSPI_MAX_MMAP_SZ) {
856 			dev_err(dev, "Memory map size outsize bounds\n");
857 			ret = -EINVAL;
858 			goto err_dma;
859 		}
860 	} else {
861 		dev_info(dev, "No memory-map region found\n");
862 	}
863 
864 	init_completion(&ospi->match_completion);
865 
866 	return 0;
867 
868 err_dma:
869 	dev_info(dev, "Can't get all resources (%d)\n", ret);
870 
871 	if (ospi->dma_chtx)
872 		dma_release_channel(ospi->dma_chtx);
873 	if (ospi->dma_chrx)
874 		dma_release_channel(ospi->dma_chrx);
875 
876 	return ret;
877 };
878 
stm32_ospi_probe(struct platform_device * pdev)879 static int stm32_ospi_probe(struct platform_device *pdev)
880 {
881 	struct device *dev = &pdev->dev;
882 	struct spi_controller *ctrl;
883 	struct stm32_ospi *ospi;
884 	struct dma_slave_config dma_cfg;
885 	struct device_node *child;
886 	int ret;
887 	u8 spi_flash_count = 0;
888 
889 	/*
890 	 * Flash subnodes sanity check:
891 	 *        1 or 2 spi-nand/spi-nor flashes		=> supported
892 	 *	  All other flash node configuration		=> not supported
893 	 */
894 	for_each_available_child_of_node(dev->of_node, child) {
895 		if (of_device_is_compatible(child, "jedec,spi-nor") ||
896 		    of_device_is_compatible(child, "spi-nand"))
897 			spi_flash_count++;
898 	}
899 
900 	if (spi_flash_count == 0 || spi_flash_count > 2) {
901 		dev_err(dev, "Incorrect DT flash node\n");
902 		return -ENODEV;
903 	}
904 
905 	ctrl = devm_spi_alloc_host(dev, sizeof(*ospi));
906 	if (!ctrl)
907 		return -ENOMEM;
908 
909 	ospi = spi_controller_get_devdata(ctrl);
910 	ospi->ctrl = ctrl;
911 
912 	ospi->dev = &pdev->dev;
913 	platform_set_drvdata(pdev, ospi);
914 
915 	ret = stm32_ospi_get_resources(pdev);
916 	if (ret)
917 		return ret;
918 
919 	memset(&dma_cfg, 0, sizeof(dma_cfg));
920 	dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
921 	dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
922 	dma_cfg.src_addr = ospi->regs_phys_base + OSPI_DR;
923 	dma_cfg.dst_addr = ospi->regs_phys_base + OSPI_DR;
924 	ret = stm32_ospi_dma_setup(ospi, &dma_cfg);
925 	if (ret)
926 		goto err_dma_free;
927 
928 	mutex_init(&ospi->lock);
929 
930 	ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
931 			  SPI_TX_DUAL | SPI_TX_QUAD |
932 			  SPI_TX_OCTAL | SPI_RX_OCTAL;
933 	ctrl->flags = SPI_CONTROLLER_HALF_DUPLEX;
934 	ctrl->setup = stm32_ospi_setup;
935 	ctrl->bus_num = -1;
936 	ctrl->mem_ops = &stm32_ospi_mem_ops;
937 	ctrl->use_gpio_descriptors = true;
938 	ctrl->transfer_one_message = stm32_ospi_transfer_one_message;
939 	ctrl->num_chipselect = STM32_OSPI_MAX_NORCHIP;
940 
941 	pm_runtime_enable(ospi->dev);
942 	pm_runtime_set_autosuspend_delay(ospi->dev, STM32_AUTOSUSPEND_DELAY);
943 	pm_runtime_use_autosuspend(ospi->dev);
944 
945 	ret = pm_runtime_resume_and_get(ospi->dev);
946 	if (ret < 0)
947 		goto err_pm_enable;
948 
949 	ret = reset_control_acquire(ospi->rstc);
950 	if (ret) {
951 		dev_err_probe(dev, ret, "Can not acquire reset %d\n", ret);
952 		goto err_pm_resume;
953 	}
954 
955 	reset_control_assert(ospi->rstc);
956 	udelay(2);
957 	reset_control_deassert(ospi->rstc);
958 
959 	ret = spi_register_controller(ctrl);
960 	if (ret) {
961 		/* Disable ospi */
962 		writel_relaxed(0, ospi->regs_base + OSPI_CR);
963 		goto err_reset_control;
964 	}
965 
966 	pm_runtime_put_autosuspend(ospi->dev);
967 
968 	return 0;
969 
970 err_reset_control:
971 	reset_control_release(ospi->rstc);
972 err_pm_resume:
973 	pm_runtime_put_sync_suspend(ospi->dev);
974 
975 err_pm_enable:
976 	pm_runtime_force_suspend(ospi->dev);
977 	mutex_destroy(&ospi->lock);
978 err_dma_free:
979 	if (ospi->dma_chtx)
980 		dma_release_channel(ospi->dma_chtx);
981 	if (ospi->dma_chrx)
982 		dma_release_channel(ospi->dma_chrx);
983 
984 	return ret;
985 }
986 
stm32_ospi_remove(struct platform_device * pdev)987 static void stm32_ospi_remove(struct platform_device *pdev)
988 {
989 	struct stm32_ospi *ospi = platform_get_drvdata(pdev);
990 
991 	pm_runtime_resume_and_get(ospi->dev);
992 
993 	spi_unregister_controller(ospi->ctrl);
994 	/* Disable ospi */
995 	writel_relaxed(0, ospi->regs_base + OSPI_CR);
996 	mutex_destroy(&ospi->lock);
997 
998 	if (ospi->dma_chtx)
999 		dma_release_channel(ospi->dma_chtx);
1000 	if (ospi->dma_chrx)
1001 		dma_release_channel(ospi->dma_chrx);
1002 
1003 	reset_control_release(ospi->rstc);
1004 
1005 	pm_runtime_put_sync_suspend(ospi->dev);
1006 	pm_runtime_force_suspend(ospi->dev);
1007 }
1008 
stm32_ospi_suspend(struct device * dev)1009 static int stm32_ospi_suspend(struct device *dev)
1010 {
1011 	struct stm32_ospi *ospi = dev_get_drvdata(dev);
1012 
1013 	pinctrl_pm_select_sleep_state(dev);
1014 
1015 	reset_control_release(ospi->rstc);
1016 
1017 	return pm_runtime_force_suspend(ospi->dev);
1018 }
1019 
stm32_ospi_resume(struct device * dev)1020 static int stm32_ospi_resume(struct device *dev)
1021 {
1022 	struct stm32_ospi *ospi = dev_get_drvdata(dev);
1023 	void __iomem *regs_base = ospi->regs_base;
1024 	int ret;
1025 
1026 	ret = pm_runtime_force_resume(ospi->dev);
1027 	if (ret < 0)
1028 		return ret;
1029 
1030 	pinctrl_pm_select_default_state(dev);
1031 
1032 	ret = pm_runtime_resume_and_get(ospi->dev);
1033 	if (ret < 0)
1034 		return ret;
1035 
1036 	ret = reset_control_acquire(ospi->rstc);
1037 	if (ret) {
1038 		dev_err(dev, "Can not acquire reset\n");
1039 		return ret;
1040 	}
1041 
1042 	writel_relaxed(ospi->cr_reg, regs_base + OSPI_CR);
1043 	writel_relaxed(ospi->dcr_reg, regs_base + OSPI_DCR1);
1044 	pm_runtime_put_autosuspend(ospi->dev);
1045 
1046 	return 0;
1047 }
1048 
stm32_ospi_runtime_suspend(struct device * dev)1049 static int stm32_ospi_runtime_suspend(struct device *dev)
1050 {
1051 	struct stm32_ospi *ospi = dev_get_drvdata(dev);
1052 
1053 	clk_disable_unprepare(ospi->clk);
1054 
1055 	return 0;
1056 }
1057 
stm32_ospi_runtime_resume(struct device * dev)1058 static int stm32_ospi_runtime_resume(struct device *dev)
1059 {
1060 	struct stm32_ospi *ospi = dev_get_drvdata(dev);
1061 
1062 	return clk_prepare_enable(ospi->clk);
1063 }
1064 
1065 static const struct dev_pm_ops stm32_ospi_pm_ops = {
1066 	SYSTEM_SLEEP_PM_OPS(stm32_ospi_suspend, stm32_ospi_resume)
1067 	RUNTIME_PM_OPS(stm32_ospi_runtime_suspend, stm32_ospi_runtime_resume, NULL)
1068 };
1069 
1070 static const struct of_device_id stm32_ospi_of_match[] = {
1071 	{ .compatible = "st,stm32mp25-ospi" },
1072 	{},
1073 };
1074 MODULE_DEVICE_TABLE(of, stm32_ospi_of_match);
1075 
1076 static struct platform_driver stm32_ospi_driver = {
1077 	.probe	= stm32_ospi_probe,
1078 	.remove	= stm32_ospi_remove,
1079 	.driver	= {
1080 		.name = "stm32-ospi",
1081 		.pm = pm_ptr(&stm32_ospi_pm_ops),
1082 		.of_match_table = stm32_ospi_of_match,
1083 	},
1084 };
1085 module_platform_driver(stm32_ospi_driver);
1086 
1087 MODULE_DESCRIPTION("STMicroelectronics STM32 OCTO SPI driver");
1088 MODULE_LICENSE("GPL");
1089