xref: /linux/drivers/spi/spi-axiado.c (revision c17ee635fd3a482b2ad2bf5e269755c2eae5f25e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 //
3 // Axiado SPI controller driver (Host mode only)
4 //
5 // Copyright (C) 2022-2025 Axiado Corporation (or its affiliates).
6 //
7 
8 #include <linux/clk.h>
9 #include <linux/delay.h>
10 #include <linux/gpio/consumer.h>
11 #include <linux/interrupt.h>
12 #include <linux/io.h>
13 #include <linux/module.h>
14 #include <linux/of_irq.h>
15 #include <linux/of_address.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/spi/spi.h>
19 #include <linux/spi/spi-mem.h>
20 #include <linux/sizes.h>
21 
22 #include "spi-axiado.h"
23 
24 /**
25  * ax_spi_read - Register Read - 32 bit per word
26  * @xspi:	Pointer to the ax_spi structure
27  * @offset:	Register offset address
28  *
29  * @return:	Returns the value of that register
30  */
31 static inline u32 ax_spi_read(struct ax_spi *xspi, u32 offset)
32 {
33 	return readl_relaxed(xspi->regs + offset);
34 }
35 
36 /**
37  * ax_spi_write - Register write - 32 bit per word
38  * @xspi:	Pointer to the ax_spi structure
39  * @offset:	Register offset address
40  * @val:	Value to write into that register
41  */
42 static inline void ax_spi_write(struct ax_spi *xspi, u32 offset, u32 val)
43 {
44 	writel_relaxed(val, xspi->regs + offset);
45 }
46 
47 /**
48  * ax_spi_write_b - Register Read - 8 bit per word
49  * @xspi:	Pointer to the ax_spi structure
50  * @offset:	Register offset address
51  * @val:	Value to write into that register
52  */
53 static inline void ax_spi_write_b(struct ax_spi *xspi, u32 offset, u8 val)
54 {
55 	writeb_relaxed(val, xspi->regs + offset);
56 }
57 
58 /**
59  * ax_spi_init_hw - Initialize the hardware and configure the SPI controller
60  * @xspi:	Pointer to the ax_spi structure
61  *
62  * * On reset the SPI controller is configured to be in host mode.
63  * In host mode baud rate divisor is set to 4, threshold value for TX FIFO
64  * not full interrupt is set to 1 and size of the word to be transferred as 8 bit.
65  *
66  * This function initializes the SPI controller to disable and clear all the
67  * interrupts, enable manual target select and manual start, deselect all the
68  * chip select lines, and enable the SPI controller.
69  */
70 static void ax_spi_init_hw(struct ax_spi *xspi)
71 {
72 	u32 reg_value;
73 
74 	/* Clear CR1 */
75 	ax_spi_write(xspi, AX_SPI_CR1, AX_SPI_CR1_CLR);
76 
77 	/* CR1 - CPO CHP MSS SCE SCR */
78 	reg_value = ax_spi_read(xspi, AX_SPI_CR1);
79 	reg_value |= AX_SPI_CR1_SCR | AX_SPI_CR1_SCE;
80 
81 	ax_spi_write(xspi, AX_SPI_CR1, reg_value);
82 
83 	/* CR2 - MTE SRD SWD SSO */
84 	reg_value = ax_spi_read(xspi, AX_SPI_CR2);
85 	reg_value |= AX_SPI_CR2_SWD | AX_SPI_CR2_SRD;
86 
87 	ax_spi_write(xspi, AX_SPI_CR2, reg_value);
88 
89 	/* CR3 - Reserverd bits S3W SDL */
90 	ax_spi_write(xspi, AX_SPI_CR3, AX_SPI_CR3_SDL);
91 
92 	/* SCDR - Reserved bits SCS SCD */
93 	ax_spi_write(xspi, AX_SPI_SCDR, (AX_SPI_SCDR_SCS | AX_SPI_SCD_DEFAULT));
94 
95 	/* IMR */
96 	ax_spi_write(xspi, AX_SPI_IMR, AX_SPI_IMR_CLR);
97 
98 	/* ISR - Clear all the interrupt */
99 	ax_spi_write(xspi, AX_SPI_ISR, AX_SPI_ISR_CLR);
100 }
101 
102 /**
103  * ax_spi_chipselect - Select or deselect the chip select line
104  * @spi:	Pointer to the spi_device structure
105  * @is_high:	Select(0) or deselect (1) the chip select line
106  */
107 static void ax_spi_chipselect(struct spi_device *spi, bool is_high)
108 {
109 	struct ax_spi *xspi = spi_controller_get_devdata(spi->controller);
110 	u32 ctrl_reg;
111 
112 	ctrl_reg = ax_spi_read(xspi, AX_SPI_CR2);
113 	/* Reset the chip select */
114 	ctrl_reg &= ~AX_SPI_DEFAULT_TS_MASK;
115 	ctrl_reg |= spi_get_chipselect(spi, 0);
116 
117 	ax_spi_write(xspi, AX_SPI_CR2, ctrl_reg);
118 }
119 
120 /**
121  * ax_spi_config_clock_mode - Sets clock polarity and phase
122  * @spi:	Pointer to the spi_device structure
123  *
124  * Sets the requested clock polarity and phase.
125  */
126 static void ax_spi_config_clock_mode(struct spi_device *spi)
127 {
128 	struct ax_spi *xspi = spi_controller_get_devdata(spi->controller);
129 	u32 ctrl_reg, new_ctrl_reg;
130 
131 	new_ctrl_reg = ax_spi_read(xspi, AX_SPI_CR1);
132 	ctrl_reg = new_ctrl_reg;
133 
134 	/* Set the SPI clock phase and clock polarity */
135 	new_ctrl_reg &= ~(AX_SPI_CR1_CPHA | AX_SPI_CR1_CPOL);
136 	if (spi->mode & SPI_CPHA)
137 		new_ctrl_reg |= AX_SPI_CR1_CPHA;
138 	if (spi->mode & SPI_CPOL)
139 		new_ctrl_reg |= AX_SPI_CR1_CPOL;
140 
141 	if (new_ctrl_reg != ctrl_reg)
142 		ax_spi_write(xspi, AX_SPI_CR1, new_ctrl_reg);
143 	ax_spi_write(xspi, AX_SPI_CR1, 0x03);
144 }
145 
146 /**
147  * ax_spi_config_clock_freq - Sets clock frequency
148  * @spi:	Pointer to the spi_device structure
149  * @transfer:	Pointer to the spi_transfer structure which provides
150  *		information about next transfer setup parameters
151  *
152  * Sets the requested clock frequency.
153  * Note: If the requested frequency is not an exact match with what can be
154  * obtained using the prescalar value the driver sets the clock frequency which
155  * is lower than the requested frequency (maximum lower) for the transfer. If
156  * the requested frequency is higher or lower than that is supported by the SPI
157  * controller the driver will set the highest or lowest frequency supported by
158  * controller.
159  */
160 static void ax_spi_config_clock_freq(struct spi_device *spi,
161 				     struct spi_transfer *transfer)
162 {
163 	struct ax_spi *xspi = spi_controller_get_devdata(spi->controller);
164 
165 	ax_spi_write(xspi, AX_SPI_SCDR, (AX_SPI_SCDR_SCS | AX_SPI_SCD_DEFAULT));
166 }
167 
168 /**
169  * ax_spi_setup_transfer - Configure SPI controller for specified transfer
170  * @spi:	Pointer to the spi_device structure
171  * @transfer:	Pointer to the spi_transfer structure which provides
172  *		information about next transfer setup parameters
173  *
174  * Sets the operational mode of SPI controller for the next SPI transfer and
175  * sets the requested clock frequency.
176  *
177  */
178 static void ax_spi_setup_transfer(struct spi_device *spi,
179 				 struct spi_transfer *transfer)
180 {
181 	struct ax_spi *xspi = spi_controller_get_devdata(spi->controller);
182 
183 	ax_spi_config_clock_freq(spi, transfer);
184 
185 	dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u clock speed\n",
186 		__func__, spi->mode, spi->bits_per_word,
187 		xspi->speed_hz);
188 }
189 
190 /**
191  * ax_spi_fill_tx_fifo - Fills the TX FIFO with as many bytes as possible
192  * @xspi:	Pointer to the ax_spi structure
193  */
194 static void ax_spi_fill_tx_fifo(struct ax_spi *xspi)
195 {
196 	unsigned long trans_cnt = 0;
197 
198 	while ((trans_cnt < xspi->tx_fifo_depth) &&
199 	       (xspi->tx_bytes > 0)) {
200 		/* When xspi in busy condition, bytes may send failed,
201 		 * then spi control did't work thoroughly, add one byte delay
202 		 */
203 		if (ax_spi_read(xspi, AX_SPI_IVR) & AX_SPI_IVR_TFOV)
204 			usleep_range(10, 10);
205 		if (xspi->tx_buf)
206 			ax_spi_write_b(xspi, AX_SPI_TXFIFO, *xspi->tx_buf++);
207 		else
208 			ax_spi_write_b(xspi, AX_SPI_TXFIFO, 0);
209 
210 		xspi->tx_bytes--;
211 		trans_cnt++;
212 	}
213 }
214 
215 /**
216  * ax_spi_get_rx_byte - Gets a byte from the RX FIFO buffer
217  * @xspi: Controller private data (struct ax_spi *)
218  *
219  * This function handles the logic of extracting bytes from the 32-bit RX FIFO.
220  * It reads a new 32-bit word from AX_SPI_RXFIFO only when the current buffered
221  * word has been fully processed (all 4 bytes extracted). It then extracts
222  * bytes one by one, assuming the controller is little-endian.
223  *
224  * Returns: The next 8-bit byte read from the RX FIFO stream.
225  */
226 static u8 ax_spi_get_rx_byte_for_irq(struct ax_spi *xspi)
227 {
228 	u8 byte_val;
229 
230 	/* If all bytes from the current 32-bit word have been extracted,
231 	 * read a new word from the hardware RX FIFO.
232 	 */
233 	if (xspi->bytes_left_in_current_rx_word_for_irq == 0) {
234 		xspi->current_rx_fifo_word_for_irq = ax_spi_read(xspi, AX_SPI_RXFIFO);
235 		xspi->bytes_left_in_current_rx_word_for_irq = 4; // A new 32-bit word has 4 bytes
236 	}
237 
238 	/* Extract the least significant byte from the current 32-bit word */
239 	byte_val = (u8)(xspi->current_rx_fifo_word_for_irq & 0xFF);
240 
241 	/* Shift the word right by 8 bits to prepare the next byte for extraction */
242 	xspi->current_rx_fifo_word_for_irq >>= 8;
243 	xspi->bytes_left_in_current_rx_word_for_irq--;
244 
245 	return byte_val;
246 }
247 
248 /**
249  * Helper function to process received bytes and check for transfer completion.
250  * This avoids code duplication and centralizes the completion logic.
251  * Returns true if the transfer was finalized.
252  */
253 static bool ax_spi_process_rx_and_finalize(struct spi_controller *ctlr)
254 {
255 	struct ax_spi *xspi = spi_controller_get_devdata(ctlr);
256 
257 	/* Process any remaining bytes in the RX FIFO */
258 	u32 avail_bytes = ax_spi_read(xspi, AX_SPI_RX_FBCAR);
259 
260 	/* This loop handles bytes that are already staged from a previous word read */
261 	while (xspi->bytes_left_in_current_rx_word_for_irq &&
262 	       (xspi->rx_copy_remaining || xspi->rx_discard)) {
263 		u8 b = ax_spi_get_rx_byte_for_irq(xspi);
264 
265 		if (xspi->rx_discard) {
266 			xspi->rx_discard--;
267 		} else {
268 			*xspi->rx_buf++ = b;
269 			xspi->rx_copy_remaining--;
270 		}
271 	}
272 
273 	/* This loop processes new words directly from the FIFO */
274 	while (avail_bytes >= 4 && (xspi->rx_copy_remaining || xspi->rx_discard)) {
275 		/* This function should handle reading from the FIFO */
276 		u8 b = ax_spi_get_rx_byte_for_irq(xspi);
277 
278 		if (xspi->rx_discard) {
279 			xspi->rx_discard--;
280 		} else {
281 			*xspi->rx_buf++ = b;
282 			xspi->rx_copy_remaining--;
283 		}
284 		/* ax_spi_get_rx_byte_for_irq fetches a new word when needed
285 		 * and updates internal state.
286 		 */
287 		if (xspi->bytes_left_in_current_rx_word_for_irq == 3)
288 			avail_bytes -= 4;
289 	}
290 
291 	/* Completion Check: The transfer is truly complete if all expected
292 	 * RX bytes have been copied or discarded.
293 	 */
294 	if (xspi->rx_copy_remaining == 0 && xspi->rx_discard == 0) {
295 		/* Defensive drain: If for some reason there are leftover bytes
296 		 * in the HW FIFO after we've logically finished,
297 		 * read and discard them to prevent them from corrupting the next transfer.
298 		 * This should be a bounded operation.
299 		 */
300 		int safety_words = AX_SPI_RX_FIFO_DRAIN_LIMIT; // Limit to avoid getting stuck
301 
302 		while (ax_spi_read(xspi, AX_SPI_RX_FBCAR) > 0 && safety_words-- > 0)
303 			ax_spi_read(xspi, AX_SPI_RXFIFO);
304 
305 		/* Disable all interrupts for this transfer and finalize. */
306 		ax_spi_write(xspi, AX_SPI_IMR, 0x00);
307 		spi_finalize_current_transfer(ctlr);
308 		return true;
309 	}
310 
311 	return false;
312 }
313 
314 /**
315  * ax_spi_irq - Interrupt service routine of the SPI controller
316  * @irq:	IRQ number
317  * @dev_id:	Pointer to the xspi structure
318  *
319  * This function handles RX FIFO almost full and Host Transfer Completed interrupts only.
320  * On RX FIFO amlost full interrupt this function reads the received data from RX FIFO and
321  * fills the TX FIFO if there is any data remaining to be transferred.
322  * On Host Transfer Completed interrupt this function indicates that transfer is completed,
323  * the SPI subsystem will clear MTC bit.
324  *
325  * Return:	IRQ_HANDLED when handled; IRQ_NONE otherwise.
326  */
327 static irqreturn_t ax_spi_irq(int irq, void *dev_id)
328 {
329 	struct spi_controller *ctlr = dev_id;
330 	struct ax_spi *xspi = spi_controller_get_devdata(ctlr);
331 	u32 intr_status;
332 
333 	intr_status = ax_spi_read(xspi, AX_SPI_IVR);
334 	if (!intr_status)
335 		return IRQ_NONE;
336 
337 	/* Handle "Message Transfer Complete" interrupt.
338 	 * This means all bytes have been shifted out of the TX FIFO.
339 	 * It's time to harvest the final incoming bytes from the RX FIFO.
340 	 */
341 	if (intr_status & AX_SPI_IVR_MTCV) {
342 		/* Clear the MTC interrupt flag immediately. */
343 		ax_spi_write(xspi, AX_SPI_ISR, AX_SPI_ISR_MTC);
344 
345 		/* For a TX-only transfer, rx_buf would be NULL.
346 		 * In the spi-core, rx_copy_remaining would be 0.
347 		 * So we can finalize immediately.
348 		 */
349 		if (!xspi->rx_buf) {
350 			ax_spi_write(xspi, AX_SPI_IMR, 0x00);
351 			spi_finalize_current_transfer(ctlr);
352 			return IRQ_HANDLED;
353 		}
354 		/* For a full-duplex transfer, process any remaining RX data.
355 		 * The helper function will handle finalization if everything is received.
356 		 */
357 		ax_spi_process_rx_and_finalize(ctlr);
358 		return IRQ_HANDLED;
359 	}
360 
361 	/* Handle "RX FIFO Full / Threshold Met" interrupt.
362 	 * This means we need to make space in the RX FIFO by reading from it.
363 	 */
364 	if (intr_status & AX_SPI_IVR_RFFV) {
365 		if (ax_spi_process_rx_and_finalize(ctlr)) {
366 			/* Transfer was finalized inside the helper, we are done. */
367 		} else {
368 			/* RX is not yet complete. If there are still TX bytes to send
369 			 * (for very long transfers), we can fill the TX FIFO again.
370 			 */
371 			if (xspi->tx_bytes)
372 				ax_spi_fill_tx_fifo(xspi);
373 		}
374 		return IRQ_HANDLED;
375 	}
376 
377 	return IRQ_NONE;
378 }
379 
380 static int ax_prepare_message(struct spi_controller *ctlr,
381 			      struct spi_message *msg)
382 {
383 	ax_spi_config_clock_mode(msg->spi);
384 	return 0;
385 }
386 
387 /**
388  * ax_transfer_one - Initiates the SPI transfer
389  * @ctlr:	Pointer to spi_controller structure
390  * @spi:	Pointer to the spi_device structure
391  * @transfer:	Pointer to the spi_transfer structure which provides
392  *		information about next transfer parameters
393  *
394  * This function fills the TX FIFO, starts the SPI transfer and
395  * returns a positive transfer count so that core will wait for completion.
396  *
397  * Return:	Number of bytes transferred in the last transfer
398  */
399 static int ax_transfer_one(struct spi_controller *ctlr,
400 			   struct spi_device *spi,
401 			   struct spi_transfer *transfer)
402 {
403 	struct ax_spi *xspi = spi_controller_get_devdata(ctlr);
404 	int drain_limit;
405 
406 	/* Pre-transfer cleanup:Flush the RX FIFO to discard any stale data.
407 	 * This is the crucial part. Before every new transfer, we must ensure
408 	 * the HW is in a clean state to avoid processing stale data
409 	 * from a previous, possibly failed or interrupted, transfer.
410 	 */
411 	drain_limit = AX_SPI_RX_FIFO_DRAIN_LIMIT; // Sane limit to prevent infinite loop on HW error
412 	while (ax_spi_read(xspi, AX_SPI_RX_FBCAR) > 0 && drain_limit-- > 0)
413 		ax_spi_read(xspi, AX_SPI_RXFIFO); // Read and discard
414 
415 	if (drain_limit <= 0)
416 		dev_warn(&ctlr->dev, "RX FIFO drain timeout before transfer\n");
417 
418 	/* Clear any stale interrupt flags from a previous transfer.
419 	 * This prevents an immediate, false interrupt trigger.
420 	 */
421 	ax_spi_write(xspi, AX_SPI_ISR, AX_SPI_ISR_CLR);
422 
423 	xspi->tx_buf = transfer->tx_buf;
424 	xspi->rx_buf = transfer->rx_buf;
425 	xspi->tx_bytes = transfer->len;
426 	xspi->rx_bytes = transfer->len;
427 
428 	/* Reset RX 32-bit to byte buffer for each new transfer */
429 	if (transfer->tx_buf && !transfer->rx_buf) {
430 		/* TX mode: discard all received data */
431 		xspi->rx_discard = transfer->len;
432 		xspi->rx_copy_remaining = 0;
433 	} else if ((!transfer->tx_buf && transfer->rx_buf) ||
434 		   (transfer->tx_buf && transfer->rx_buf)) {
435 		/* RX mode: generate clock by filling TX FIFO with dummy bytes
436 		 * Full-duplex mode: generate clock by filling TX FIFO
437 		 */
438 		xspi->rx_discard = 0;
439 		xspi->rx_copy_remaining = transfer->len;
440 	} else {
441 		/* No TX and RX */
442 		xspi->rx_discard = 0;
443 		xspi->rx_copy_remaining = transfer->len;
444 	}
445 
446 	ax_spi_setup_transfer(spi, transfer);
447 	ax_spi_fill_tx_fifo(xspi);
448 	ax_spi_write(xspi, AX_SPI_CR2, (AX_SPI_CR2_HTE | AX_SPI_CR2_SRD | AX_SPI_CR2_SWD));
449 
450 	ax_spi_write(xspi, AX_SPI_IMR, (AX_SPI_IMR_MTCM | AX_SPI_IMR_RFFM));
451 	return transfer->len;
452 }
453 
454 /**
455  * ax_prepare_transfer_hardware - Prepares hardware for transfer.
456  * @ctlr:	Pointer to the spi_controller structure which provides
457  *		information about the controller.
458  *
459  * This function enables SPI host controller.
460  *
461  * Return:	0 always
462  */
463 static int ax_prepare_transfer_hardware(struct spi_controller *ctlr)
464 {
465 	struct ax_spi *xspi = spi_controller_get_devdata(ctlr);
466 
467 	u32 reg_value;
468 
469 	reg_value = ax_spi_read(xspi, AX_SPI_CR1);
470 	reg_value |= AX_SPI_CR1_SCE;
471 
472 	ax_spi_write(xspi, AX_SPI_CR1, reg_value);
473 
474 	return 0;
475 }
476 
477 /**
478  * ax_unprepare_transfer_hardware - Relaxes hardware after transfer
479  * @ctlr:	Pointer to the spi_controller structure which provides
480  *		information about the controller.
481  *
482  * This function disables the SPI host controller when no target selected.
483  *
484  * Return:	0 always
485  */
486 static int ax_unprepare_transfer_hardware(struct spi_controller *ctlr)
487 {
488 	struct ax_spi *xspi = spi_controller_get_devdata(ctlr);
489 
490 	u32 reg_value;
491 
492 	/* Disable the SPI if target is deselected */
493 	reg_value = ax_spi_read(xspi, AX_SPI_CR1);
494 	reg_value &= ~AX_SPI_CR1_SCE;
495 
496 	ax_spi_write(xspi, AX_SPI_CR1, reg_value);
497 
498 	return 0;
499 }
500 
501 /**
502  * ax_spi_detect_fifo_depth - Detect the FIFO depth of the hardware
503  * @xspi:	Pointer to the ax_spi structure
504  *
505  * The depth of the TX FIFO is a synthesis configuration parameter of the SPI
506  * IP. The FIFO threshold register is sized so that its maximum value can be the
507  * FIFO size - 1. This is used to detect the size of the FIFO.
508  */
509 static void ax_spi_detect_fifo_depth(struct ax_spi *xspi)
510 {
511 	/* The MSBs will get truncated giving us the size of the FIFO */
512 	ax_spi_write(xspi, AX_SPI_TX_FAETR, ALMOST_EMPTY_TRESHOLD);
513 	xspi->tx_fifo_depth = FIFO_DEPTH;
514 
515 	/* Set the threshold limit */
516 	ax_spi_write(xspi, AX_SPI_TX_FAETR, ALMOST_EMPTY_TRESHOLD);
517 	ax_spi_write(xspi, AX_SPI_RX_FAFTR, ALMOST_FULL_TRESHOLD);
518 }
519 
520 /* --- Internal Helper Function for 32-bit RX FIFO Read --- */
521 /**
522  * ax_spi_get_rx_byte - Gets a byte from the RX FIFO buffer
523  * @xspi: Controller private data (struct ax_spi *)
524  *
525  * This function handles the logic of extracting bytes from the 32-bit RX FIFO.
526  * It reads a new 32-bit word from AX_SPI_RXFIFO only when the current buffered
527  * word has been fully processed (all 4 bytes extracted). It then extracts
528  * bytes one by one, assuming the controller is little-endian.
529  *
530  * Returns: The next 8-bit byte read from the RX FIFO stream.
531  */
532 static u8 ax_spi_get_rx_byte(struct ax_spi *xspi)
533 {
534 	u8 byte_val;
535 
536 	/* If all bytes from the current 32-bit word have been extracted,
537 	 * read a new word from the hardware RX FIFO.
538 	 */
539 	if (xspi->bytes_left_in_current_rx_word == 0) {
540 		xspi->current_rx_fifo_word = ax_spi_read(xspi, AX_SPI_RXFIFO);
541 		xspi->bytes_left_in_current_rx_word = 4; // A new 32-bit word has 4 bytes
542 	}
543 
544 	/* Extract the least significant byte from the current 32-bit word */
545 	byte_val = (u8)(xspi->current_rx_fifo_word & 0xFF);
546 
547 	/* Shift the word right by 8 bits to prepare the next byte for extraction */
548 	xspi->current_rx_fifo_word >>= 8;
549 	xspi->bytes_left_in_current_rx_word--;
550 
551 	return byte_val;
552 }
553 
554 static int ax_spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
555 {
556 	struct spi_device *spi = mem->spi;
557 	struct ax_spi *xspi = spi_controller_get_devdata(spi->controller);
558 	u32 reg_val;
559 	int ret = 0;
560 	u8 cmd_buf[AX_SPI_COMMAND_BUFFER_SIZE];
561 	int cmd_len = 0;
562 	int i = 0, timeout = AX_SPI_TRX_FIFO_TIMEOUT;
563 	int bytes_to_discard_from_rx;
564 	u8 *rx_buf_ptr = (u8 *)op->data.buf.in;
565 	u8 *tx_buf_ptr = (u8 *)op->data.buf.out;
566 	u32 rx_count_reg = 0;
567 
568 	dev_dbg(&spi->dev,
569 		"%s: cmd:%02x mode:%d.%d.%d.%d addr:%llx len:%d\n",
570 		__func__, op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
571 		op->dummy.buswidth, op->data.buswidth, op->addr.val,
572 		op->data.nbytes);
573 
574 	/* Validate operation parameters: Only 1-bit bus width supported */
575 	if (op->cmd.buswidth != 1 ||
576 	    (op->addr.nbytes && op->addr.buswidth != 0 &&
577 	    op->addr.buswidth != 1) ||
578 	    (op->dummy.nbytes && op->dummy.buswidth != 0 &&
579 	    op->dummy.buswidth != 1) ||
580 	    (op->data.nbytes && op->data.buswidth != 1)) {
581 		dev_err(&spi->dev, "Unsupported bus width, only 1-bit bus width supported\n");
582 		return -EOPNOTSUPP;
583 	}
584 
585 	/* Initialize controller hardware */
586 	ax_spi_init_hw(xspi);
587 
588 	/* Assert chip select (pull low) */
589 	ax_spi_chipselect(spi, false);
590 
591 	/* Build command phase: Copy opcode to cmd_buf */
592 	if (op->cmd.nbytes == 2) {
593 		cmd_buf[cmd_len++] = (op->cmd.opcode >> 8) & 0xFF;
594 		cmd_buf[cmd_len++] = op->cmd.opcode & 0xFF;
595 	} else {
596 		cmd_buf[cmd_len++] = op->cmd.opcode;
597 	}
598 
599 	/* Put address bytes to cmd_buf */
600 	if (op->addr.nbytes) {
601 		for (i = op->addr.nbytes - 1; i >= 0; i--) {
602 			cmd_buf[cmd_len] = (op->addr.val >> (i * 8)) & 0xFF;
603 			cmd_len++;
604 		}
605 	}
606 
607 	/* Configure controller for desired operation mode (write/read) */
608 	reg_val = ax_spi_read(xspi, AX_SPI_CR2);
609 	reg_val |= AX_SPI_CR2_SWD | AX_SPI_CR2_SRI | AX_SPI_CR2_SRD;
610 	ax_spi_write(xspi, AX_SPI_CR2, reg_val);
611 
612 	/* Write command and address bytes to TX_FIFO */
613 	for (i = 0; i < cmd_len; i++)
614 		ax_spi_write_b(xspi, AX_SPI_TXFIFO, cmd_buf[i]);
615 
616 	/* Add dummy bytes (for clock generation) or actual data bytes to TX_FIFO */
617 	if (op->data.dir == SPI_MEM_DATA_IN) {
618 		for (i = 0; i < op->dummy.nbytes; i++)
619 			ax_spi_write_b(xspi, AX_SPI_TXFIFO, 0x00);
620 		for (i = 0; i < op->data.nbytes; i++)
621 			ax_spi_write_b(xspi, AX_SPI_TXFIFO, 0x00);
622 	} else {
623 		for (i = 0; i < op->data.nbytes; i++)
624 			ax_spi_write_b(xspi, AX_SPI_TXFIFO, tx_buf_ptr[i]);
625 	}
626 
627 	/* Start the SPI transmission */
628 	reg_val = ax_spi_read(xspi, AX_SPI_CR2);
629 	reg_val |= AX_SPI_CR2_HTE;
630 	ax_spi_write(xspi, AX_SPI_CR2, reg_val);
631 
632 	/* Wait for TX FIFO to become empty */
633 	while (timeout-- > 0) {
634 		u32 tx_count_reg = ax_spi_read(xspi, AX_SPI_TX_FBCAR);
635 
636 		if (tx_count_reg == 0) {
637 			udelay(1);
638 			break;
639 		}
640 		udelay(1);
641 	}
642 
643 	/* Handle Data Reception (for read operations) */
644 	if (op->data.dir == SPI_MEM_DATA_IN) {
645 		/* Reset the internal RX byte buffer for this new operation.
646 		 * This ensures ax_spi_get_rx_byte starts fresh for each exec_op call.
647 		 */
648 		xspi->bytes_left_in_current_rx_word = 0;
649 		xspi->current_rx_fifo_word = 0;
650 
651 		timeout = AX_SPI_TRX_FIFO_TIMEOUT;
652 		while (timeout-- > 0) {
653 			rx_count_reg = ax_spi_read(xspi, AX_SPI_RX_FBCAR);
654 			if (rx_count_reg >= op->data.nbytes)
655 				break;
656 			udelay(1); /* Small delay to prevent aggressive busy-waiting */
657 		}
658 
659 		if (timeout < 0) {
660 			ret = -ETIMEDOUT;
661 			goto out_unlock;
662 		}
663 
664 		/* Calculate how many bytes we need to discard from the RX FIFO.
665 		 * Since we set SRI, we only need to discard the address bytes and
666 		 * dummy bytes from the RX FIFO.
667 		 */
668 		bytes_to_discard_from_rx = op->addr.nbytes + op->dummy.nbytes;
669 		for (i = 0; i < bytes_to_discard_from_rx; i++)
670 			ax_spi_get_rx_byte(xspi);
671 
672 		/* Read actual data bytes into op->data.buf.in */
673 		for (i = 0; i < op->data.nbytes; i++) {
674 			*rx_buf_ptr = ax_spi_get_rx_byte(xspi);
675 			rx_buf_ptr++;
676 		}
677 	} else if (op->data.dir == SPI_MEM_DATA_OUT) {
678 		timeout = AX_SPI_TRX_FIFO_TIMEOUT;
679 		while (timeout-- > 0) {
680 			u32 tx_fifo_level = ax_spi_read(xspi, AX_SPI_TX_FBCAR);
681 
682 			if (tx_fifo_level == 0)
683 				break;
684 			udelay(1);
685 		}
686 		if (timeout < 0) {
687 			ret = -ETIMEDOUT;
688 			goto out_unlock;
689 		}
690 	}
691 
692 out_unlock:
693 	/* Deassert chip select (pull high) */
694 	ax_spi_chipselect(spi, true);
695 
696 	return ret;
697 }
698 
699 static int ax_spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
700 {
701 	struct spi_device *spi = mem->spi;
702 	struct ax_spi *xspi = spi_controller_get_devdata(spi->controller);
703 	size_t max_transfer_payload_bytes;
704 	size_t fifo_total_bytes;
705 	size_t protocol_overhead_bytes;
706 
707 	fifo_total_bytes = xspi->tx_fifo_depth;
708 	/* Calculate protocol overhead bytes according to the real operation each time. */
709 	protocol_overhead_bytes = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
710 
711 	/* Calculate the maximum data payload that can fit into the FIFO. */
712 	if (fifo_total_bytes <= protocol_overhead_bytes) {
713 		max_transfer_payload_bytes = 0;
714 		dev_warn_once(&spi->dev, "SPI FIFO (%zu bytes) is too small for protocol overhead (%zu bytes)! Max data size forced to 0.\n",
715 			 fifo_total_bytes, protocol_overhead_bytes);
716 	} else {
717 		max_transfer_payload_bytes = fifo_total_bytes - protocol_overhead_bytes;
718 	}
719 
720 	/* Limit op->data.nbytes based on the calculated max payload and SZ_64K.
721 	 * This is the value that spi-mem will then use to split requests.
722 	 */
723 	if (op->data.nbytes > max_transfer_payload_bytes) {
724 		op->data.nbytes = max_transfer_payload_bytes;
725 		dev_dbg(&spi->dev, "%s %d: op->data.nbytes adjusted to %u due to FIFO overhead\n",
726 			__func__, __LINE__, op->data.nbytes);
727 	}
728 
729 	/* Also apply the overall max transfer size */
730 	if (op->data.nbytes > SZ_64K) {
731 		op->data.nbytes = SZ_64K;
732 		dev_dbg(&spi->dev, "%s %d: op->data.nbytes adjusted to %u due to SZ_64K limit\n",
733 			__func__, __LINE__, op->data.nbytes);
734 	}
735 
736 	return 0;
737 }
738 
739 static const struct spi_controller_mem_ops ax_spi_mem_ops = {
740 	.exec_op = ax_spi_mem_exec_op,
741 	.adjust_op_size = ax_spi_mem_adjust_op_size,
742 };
743 
744 /**
745  * ax_spi_probe - Probe method for the SPI driver
746  * @pdev:	Pointer to the platform_device structure
747  *
748  * This function initializes the driver data structures and the hardware.
749  *
750  * Return:	0 on success and error value on error
751  */
752 static int ax_spi_probe(struct platform_device *pdev)
753 {
754 	int ret = 0, irq;
755 	struct spi_controller *ctlr;
756 	struct ax_spi *xspi;
757 	u32 num_cs;
758 
759 	ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*xspi));
760 	if (!ctlr)
761 		return -ENOMEM;
762 
763 	xspi = spi_controller_get_devdata(ctlr);
764 	ctlr->dev.of_node = pdev->dev.of_node;
765 	platform_set_drvdata(pdev, ctlr);
766 
767 	xspi->regs = devm_platform_ioremap_resource(pdev, 0);
768 	if (IS_ERR(xspi->regs)) {
769 		ret = PTR_ERR(xspi->regs);
770 		goto remove_ctlr;
771 	}
772 
773 	xspi->pclk = devm_clk_get(&pdev->dev, "pclk");
774 	if (IS_ERR(xspi->pclk)) {
775 		dev_err(&pdev->dev, "pclk clock not found.\n");
776 		ret = PTR_ERR(xspi->pclk);
777 		goto remove_ctlr;
778 	}
779 
780 	xspi->ref_clk = devm_clk_get(&pdev->dev, "ref");
781 	if (IS_ERR(xspi->ref_clk)) {
782 		dev_err(&pdev->dev, "ref clock not found.\n");
783 		ret = PTR_ERR(xspi->ref_clk);
784 		goto remove_ctlr;
785 	}
786 
787 	ret = clk_prepare_enable(xspi->pclk);
788 	if (ret) {
789 		dev_err(&pdev->dev, "Unable to enable APB clock.\n");
790 		goto remove_ctlr;
791 	}
792 
793 	ret = clk_prepare_enable(xspi->ref_clk);
794 	if (ret) {
795 		dev_err(&pdev->dev, "Unable to enable device clock.\n");
796 		goto clk_dis_apb;
797 	}
798 
799 	pm_runtime_use_autosuspend(&pdev->dev);
800 	pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
801 	pm_runtime_get_noresume(&pdev->dev);
802 	pm_runtime_set_active(&pdev->dev);
803 	pm_runtime_enable(&pdev->dev);
804 
805 	ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
806 	if (ret < 0)
807 		ctlr->num_chipselect = AX_SPI_DEFAULT_NUM_CS;
808 	else
809 		ctlr->num_chipselect = num_cs;
810 
811 	ax_spi_detect_fifo_depth(xspi);
812 
813 	xspi->current_rx_fifo_word = 0;
814 	xspi->bytes_left_in_current_rx_word = 0;
815 
816 	/* Initialize IRQ-related variables */
817 	xspi->bytes_left_in_current_rx_word_for_irq = 0;
818 	xspi->current_rx_fifo_word_for_irq = 0;
819 
820 	/* SPI controller initializations */
821 	ax_spi_init_hw(xspi);
822 
823 	irq = platform_get_irq(pdev, 0);
824 	if (irq <= 0) {
825 		ret = -ENXIO;
826 		goto clk_dis_all;
827 	}
828 
829 	ret = devm_request_irq(&pdev->dev, irq, ax_spi_irq,
830 			       0, pdev->name, ctlr);
831 	if (ret != 0) {
832 		ret = -ENXIO;
833 		dev_err(&pdev->dev, "request_irq failed\n");
834 		goto clk_dis_all;
835 	}
836 
837 	ctlr->use_gpio_descriptors = true;
838 	ctlr->prepare_transfer_hardware = ax_prepare_transfer_hardware;
839 	ctlr->prepare_message = ax_prepare_message;
840 	ctlr->transfer_one = ax_transfer_one;
841 	ctlr->unprepare_transfer_hardware = ax_unprepare_transfer_hardware;
842 	ctlr->set_cs = ax_spi_chipselect;
843 	ctlr->auto_runtime_pm = true;
844 	ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
845 
846 	xspi->clk_rate = clk_get_rate(xspi->ref_clk);
847 	/* Set to default valid value */
848 	ctlr->max_speed_hz = xspi->clk_rate / 2;
849 	xspi->speed_hz = ctlr->max_speed_hz;
850 
851 	ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
852 
853 	pm_runtime_mark_last_busy(&pdev->dev);
854 	pm_runtime_put_autosuspend(&pdev->dev);
855 
856 	ctlr->mem_ops = &ax_spi_mem_ops;
857 
858 	ret = spi_register_controller(ctlr);
859 	if (ret) {
860 		dev_err(&pdev->dev, "spi_register_controller failed\n");
861 		goto clk_dis_all;
862 	}
863 
864 	return ret;
865 
866 clk_dis_all:
867 	pm_runtime_set_suspended(&pdev->dev);
868 	pm_runtime_disable(&pdev->dev);
869 	clk_disable_unprepare(xspi->ref_clk);
870 clk_dis_apb:
871 	clk_disable_unprepare(xspi->pclk);
872 remove_ctlr:
873 	spi_controller_put(ctlr);
874 	return ret;
875 }
876 
877 /**
878  * ax_spi_remove - Remove method for the SPI driver
879  * @pdev:	Pointer to the platform_device structure
880  *
881  * This function is called if a device is physically removed from the system or
882  * if the driver module is being unloaded. It frees all resources allocated to
883  * the device.
884  */
885 static void ax_spi_remove(struct platform_device *pdev)
886 {
887 	struct spi_controller *ctlr = platform_get_drvdata(pdev);
888 	struct ax_spi *xspi = spi_controller_get_devdata(ctlr);
889 
890 	spi_unregister_controller(ctlr);
891 
892 	pm_runtime_set_suspended(&pdev->dev);
893 	pm_runtime_disable(&pdev->dev);
894 
895 	clk_disable_unprepare(xspi->ref_clk);
896 	clk_disable_unprepare(xspi->pclk);
897 }
898 
899 /**
900  * ax_spi_suspend - Suspend method for the SPI driver
901  * @dev:	Address of the platform_device structure
902  *
903  * This function disables the SPI controller and
904  * changes the driver state to "suspend"
905  *
906  * Return:	0 on success and error value on error
907  */
908 static int __maybe_unused ax_spi_suspend(struct device *dev)
909 {
910 	struct spi_controller *ctlr = dev_get_drvdata(dev);
911 
912 	return spi_controller_suspend(ctlr);
913 }
914 
915 /**
916  * ax_spi_resume - Resume method for the SPI driver
917  * @dev:	Address of the platform_device structure
918  *
919  * This function changes the driver state to "ready"
920  *
921  * Return:	0 on success and error value on error
922  */
923 static int __maybe_unused ax_spi_resume(struct device *dev)
924 {
925 	struct spi_controller *ctlr = dev_get_drvdata(dev);
926 	struct ax_spi *xspi = spi_controller_get_devdata(ctlr);
927 
928 	ax_spi_init_hw(xspi);
929 	return spi_controller_resume(ctlr);
930 }
931 
932 /**
933  * ax_spi_runtime_resume - Runtime resume method for the SPI driver
934  * @dev:	Address of the platform_device structure
935  *
936  * This function enables the clocks
937  *
938  * Return:	0 on success and error value on error
939  */
940 static int __maybe_unused ax_spi_runtime_resume(struct device *dev)
941 {
942 	struct spi_controller *ctlr = dev_get_drvdata(dev);
943 	struct ax_spi *xspi = spi_controller_get_devdata(ctlr);
944 	int ret;
945 
946 	ret = clk_prepare_enable(xspi->pclk);
947 	if (ret) {
948 		dev_err(dev, "Cannot enable APB clock.\n");
949 		return ret;
950 	}
951 
952 	ret = clk_prepare_enable(xspi->ref_clk);
953 	if (ret) {
954 		dev_err(dev, "Cannot enable device clock.\n");
955 		clk_disable_unprepare(xspi->pclk);
956 		return ret;
957 	}
958 	return 0;
959 }
960 
961 /**
962  * ax_spi_runtime_suspend - Runtime suspend method for the SPI driver
963  * @dev:	Address of the platform_device structure
964  *
965  * This function disables the clocks
966  *
967  * Return:	Always 0
968  */
969 static int __maybe_unused ax_spi_runtime_suspend(struct device *dev)
970 {
971 	struct spi_controller *ctlr = dev_get_drvdata(dev);
972 	struct ax_spi *xspi = spi_controller_get_devdata(ctlr);
973 
974 	clk_disable_unprepare(xspi->ref_clk);
975 	clk_disable_unprepare(xspi->pclk);
976 
977 	return 0;
978 }
979 
980 static const struct dev_pm_ops ax_spi_dev_pm_ops = {
981 	SET_RUNTIME_PM_OPS(ax_spi_runtime_suspend,
982 			   ax_spi_runtime_resume, NULL)
983 	SET_SYSTEM_SLEEP_PM_OPS(ax_spi_suspend, ax_spi_resume)
984 };
985 
986 static const struct of_device_id ax_spi_of_match[] = {
987 	{ .compatible = "axiado,ax3000-spi" },
988 	{ /* end of table */ }
989 };
990 MODULE_DEVICE_TABLE(of, ax_spi_of_match);
991 
992 /* ax_spi_driver - This structure defines the SPI subsystem platform driver */
993 static struct platform_driver ax_spi_driver = {
994 	.probe	= ax_spi_probe,
995 	.remove	= ax_spi_remove,
996 	.driver = {
997 		.name = AX_SPI_NAME,
998 		.of_match_table = ax_spi_of_match,
999 		.pm = &ax_spi_dev_pm_ops,
1000 	},
1001 };
1002 
1003 module_platform_driver(ax_spi_driver);
1004 
1005 MODULE_AUTHOR("Axiado Corporation");
1006 MODULE_DESCRIPTION("Axiado SPI Host driver");
1007 MODULE_LICENSE("GPL");
1008