xref: /freebsd/sys/dev/qcom_qup/qcom_spi_hw.c (revision 81bee6d793ee9543dc5391d980b0675cbdeb189a)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2021, Adrian Chadd <adrian@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice unmodified, this list of conditions, and the following
11  *    disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 
32 #include <sys/bus.h>
33 #include <sys/interrupt.h>
34 #include <sys/malloc.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/kernel.h>
38 #include <sys/module.h>
39 #include <sys/rman.h>
40 
41 #include <vm/vm.h>
42 #include <vm/pmap.h>
43 #include <vm/vm_extern.h>
44 
45 #include <machine/bus.h>
46 #include <machine/cpu.h>
47 
48 #include <dev/gpio/gpiobusvar.h>
49 #include <dev/ofw/ofw_bus.h>
50 #include <dev/ofw/ofw_bus_subr.h>
51 
52 #include <dev/clk/clk.h>
53 #include <dev/hwreset/hwreset.h>
54 
55 #include <dev/spibus/spi.h>
56 #include <dev/spibus/spibusvar.h>
57 #include "spibus_if.h"
58 
59 #include <dev/qcom_qup/qcom_spi_var.h>
60 #include <dev/qcom_qup/qcom_spi_reg.h>
61 #include <dev/qcom_qup/qcom_qup_reg.h>
62 #include <dev/qcom_qup/qcom_spi_debug.h>
63 
64 int
65 qcom_spi_hw_read_controller_transfer_sizes(struct qcom_spi_softc *sc)
66 {
67 	uint32_t reg, val;
68 
69 	reg = QCOM_SPI_READ_4(sc, QUP_IO_M_MODES);
70 
71 	QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_TRANSFER_SETUP,
72 	    "%s: QUP_IO_M_MODES=0x%08x\n", __func__, reg);
73 
74 	/* Input block size */
75 	val = (reg >> QUP_IO_M_INPUT_BLOCK_SIZE_SHIFT)
76 	    & QUP_IO_M_INPUT_BLOCK_SIZE_MASK;
77 	if (val == 0)
78 		sc->config.input_block_size = 4;
79 	else
80 		sc->config.input_block_size = val * 16;
81 
82 	/* Output block size */
83 	val = (reg >> QUP_IO_M_OUTPUT_BLOCK_SIZE_SHIFT)
84 	    & QUP_IO_M_OUTPUT_BLOCK_SIZE_MASK;
85 	if (val == 0)
86 		sc->config.output_block_size = 4;
87 	else
88 		sc->config.output_block_size = val * 16;
89 
90 	/* Input FIFO size */
91 	val = (reg >> QUP_IO_M_INPUT_FIFO_SIZE_SHIFT)
92 	    & QUP_IO_M_INPUT_FIFO_SIZE_MASK;
93 	sc->config.input_fifo_size =
94 	    sc->config.input_block_size * (2 << val);
95 
96 	/* Output FIFO size */
97 	val = (reg >> QUP_IO_M_OUTPUT_FIFO_SIZE_SHIFT)
98 	    & QUP_IO_M_OUTPUT_FIFO_SIZE_MASK;
99 	sc->config.output_fifo_size =
100 	    sc->config.output_block_size * (2 << val);
101 
102 	return (0);
103 }
104 
105 static bool
106 qcom_spi_hw_qup_is_state_valid_locked(struct qcom_spi_softc *sc)
107 {
108 	uint32_t reg;
109 
110 	QCOM_SPI_ASSERT_LOCKED(sc);
111 
112 	reg = QCOM_SPI_READ_4(sc, QUP_STATE);
113 	QCOM_SPI_BARRIER_READ(sc);
114 
115 	return !! (reg & QUP_STATE_VALID);
116 }
117 
118 static int
119 qcom_spi_hw_qup_wait_state_valid_locked(struct qcom_spi_softc *sc)
120 {
121 	int i;
122 
123 	for (i = 0; i < 10; i++) {
124 		if (qcom_spi_hw_qup_is_state_valid_locked(sc))
125 			break;
126 	}
127 	if (i >= 10) {
128 		device_printf(sc->sc_dev,
129 		    "ERROR: timeout waiting for valid state\n");
130 		return (ENXIO);
131 	}
132 	return (0);
133 }
134 
135 static bool
136 qcom_spi_hw_is_opmode_dma_locked(struct qcom_spi_softc *sc)
137 {
138 
139 	QCOM_SPI_ASSERT_LOCKED(sc);
140 
141 	if (sc->state.transfer_mode == QUP_IO_M_MODE_DMOV)
142 		return (true);
143 	if (sc->state.transfer_mode == QUP_IO_M_MODE_BAM)
144 		return (true);
145 	return (false);
146 }
147 
148 int
149 qcom_spi_hw_qup_set_state_locked(struct qcom_spi_softc *sc, uint32_t state)
150 {
151 	uint32_t cur_state;
152 	int ret;
153 
154 	QCOM_SPI_ASSERT_LOCKED(sc);
155 
156 	/* Wait until the state becomes valid */
157 	ret = qcom_spi_hw_qup_wait_state_valid_locked(sc);
158 	if (ret != 0) {
159 		return (ret);
160 	}
161 
162 	cur_state = QCOM_SPI_READ_4(sc, QUP_STATE);
163 
164 	QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_STATE_CHANGE,
165 	    "%s: target state=%d, cur_state=0x%08x\n",
166 	    __func__, state, cur_state);
167 
168 	/*
169 	 * According to the QUP specification, when going
170 	 * from PAUSE to RESET, two writes are required.
171 	 */
172 	if ((state == QUP_STATE_RESET)
173 	    && ((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE)) {
174 		QCOM_SPI_WRITE_4(sc, QUP_STATE, QUP_STATE_CLEAR);
175 		QCOM_SPI_BARRIER_WRITE(sc);
176 		QCOM_SPI_WRITE_4(sc, QUP_STATE, QUP_STATE_CLEAR);
177 		QCOM_SPI_BARRIER_WRITE(sc);
178 	} else {
179 		cur_state &= ~QUP_STATE_MASK;
180 		cur_state |= state;
181 		QCOM_SPI_WRITE_4(sc, QUP_STATE, cur_state);
182 		QCOM_SPI_BARRIER_WRITE(sc);
183 	}
184 
185 	/* Wait until the state becomes valid */
186 	ret = qcom_spi_hw_qup_wait_state_valid_locked(sc);
187 	if (ret != 0) {
188 		return (ret);
189 	}
190 
191 	cur_state = QCOM_SPI_READ_4(sc, QUP_STATE);
192 
193 	QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_STATE_CHANGE,
194 	    "%s: FINISH: target state=%d, cur_state=0x%08x\n",
195 	    __func__, state, cur_state);
196 
197 	return (0);
198 }
199 
200 /*
201  * Do initial QUP setup.
202  *
203  * This is initially for the SPI driver; it would be interesting to see how
204  * much of this is the same with the I2C/HSUART paths.
205  */
206 int
207 qcom_spi_hw_qup_init_locked(struct qcom_spi_softc *sc)
208 {
209 	int ret;
210 
211 	QCOM_SPI_ASSERT_LOCKED(sc);
212 
213 	/* Full hardware reset */
214 	(void) qcom_spi_hw_do_full_reset(sc);
215 
216 	ret = qcom_spi_hw_qup_set_state_locked(sc, QUP_STATE_RESET);
217 	if (ret != 0) {
218 		device_printf(sc->sc_dev, "ERROR: %s: couldn't reset\n",
219 		    __func__);
220 		goto error;
221 	}
222 
223 	QCOM_SPI_WRITE_4(sc, QUP_OPERATIONAL, 0);
224 	QCOM_SPI_WRITE_4(sc, QUP_IO_M_MODES, 0);
225 	/* Note: no QUP_OPERATIONAL_MASK in QUP v1 */
226 	if (! QCOM_SPI_QUP_VERSION_V1(sc))
227 		QCOM_SPI_WRITE_4(sc, QUP_OPERATIONAL_MASK, 0);
228 
229 	/* Explicitly disable input overrun in QUP v1 */
230 	if (QCOM_SPI_QUP_VERSION_V1(sc))
231 		QCOM_SPI_WRITE_4(sc, QUP_ERROR_FLAGS_EN,
232 		    QUP_ERROR_OUTPUT_OVER_RUN
233 		    | QUP_ERROR_INPUT_UNDER_RUN
234 		    | QUP_ERROR_OUTPUT_UNDER_RUN);
235 	QCOM_SPI_BARRIER_WRITE(sc);
236 
237 	return (0);
238 error:
239 	return (ret);
240 }
241 
242 /*
243  * Do initial SPI setup.
244  */
245 int
246 qcom_spi_hw_spi_init_locked(struct qcom_spi_softc *sc)
247 {
248 
249 	QCOM_SPI_ASSERT_LOCKED(sc);
250 
251 	/* Initial SPI error flags */
252 	QCOM_SPI_WRITE_4(sc, SPI_ERROR_FLAGS_EN,
253 	    QUP_ERROR_INPUT_UNDER_RUN
254 	    | QUP_ERROR_OUTPUT_UNDER_RUN);
255 	QCOM_SPI_BARRIER_WRITE(sc);
256 
257 	/* Initial SPI config */
258 	QCOM_SPI_WRITE_4(sc, SPI_CONFIG, 0);
259 	QCOM_SPI_BARRIER_WRITE(sc);
260 
261 	/* Initial CS/tri-state io control config */
262 	QCOM_SPI_WRITE_4(sc, SPI_IO_CONTROL,
263 	    SPI_IO_C_NO_TRI_STATE
264 	    | SPI_IO_C_CS_SELECT(sc->config.cs_select));
265 	QCOM_SPI_BARRIER_WRITE(sc);
266 
267 	return (0);
268 }
269 
270 /*
271  * Force the currently selected device CS line to be active
272  * or inactive.
273  *
274  * This forces it to be active or inactive rather than letting
275  * the SPI transfer machine do its thing.  If you want to be able
276  * break up a big transaction into a handful of smaller ones,
277  * without toggling /CS_n for that device, then you need it forced.
278  * (If you toggle the /CS_n to the device to inactive then active,
279  * NOR/NAND devices tend to stop a block transfer.)
280  */
281 int
282 qcom_spi_hw_spi_cs_force(struct qcom_spi_softc *sc, int cs, bool enable)
283 {
284 	uint32_t reg;
285 
286 	QCOM_SPI_ASSERT_LOCKED(sc);
287 
288 	QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_CHIPSELECT,
289 	    "%s: called, enable=%u\n",
290 	    __func__, enable);
291 
292 	reg = QCOM_SPI_READ_4(sc, SPI_IO_CONTROL);
293 	if (enable)
294 		reg |= SPI_IO_C_FORCE_CS;
295 	else
296 		reg &= ~SPI_IO_C_FORCE_CS;
297 	reg &= ~SPI_IO_C_CS_SELECT_MASK;
298 	reg |= SPI_IO_C_CS_SELECT(cs);
299 	QCOM_SPI_WRITE_4(sc, SPI_IO_CONTROL, reg);
300 	QCOM_SPI_BARRIER_WRITE(sc);
301 
302 	return (0);
303 }
304 
305 /*
306  * ACK/store current interrupt flag state.
307  */
308 int
309 qcom_spi_hw_interrupt_handle(struct qcom_spi_softc *sc)
310 {
311 	uint32_t qup_error, spi_error, op_flags;
312 
313 	QCOM_SPI_ASSERT_LOCKED(sc);
314 
315 	/* Get QUP/SPI state */
316 	qup_error = QCOM_SPI_READ_4(sc, QUP_ERROR_FLAGS);
317 	spi_error = QCOM_SPI_READ_4(sc, SPI_ERROR_FLAGS);
318 	op_flags = QCOM_SPI_READ_4(sc, QUP_OPERATIONAL);
319 
320 	/* ACK state */
321 	QCOM_SPI_WRITE_4(sc, QUP_ERROR_FLAGS, qup_error);
322 	QCOM_SPI_WRITE_4(sc, SPI_ERROR_FLAGS, spi_error);
323 
324 	QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_INTR,
325 	    "%s: called; qup=0x%08x, spi=0x%08x, op=0x%08x\n",
326 	    __func__,
327 	    qup_error,
328 	    spi_error,
329 	    op_flags);
330 
331 	/* handle error flags */
332 	if (qup_error != 0) {
333 		device_printf(sc->sc_dev, "ERROR: (QUP) mask=0x%08x\n",
334 		    qup_error);
335 		sc->intr.error = true;
336 	}
337 	if (spi_error != 0) {
338 		device_printf(sc->sc_dev, "ERROR: (SPI) mask=0x%08x\n",
339 		    spi_error);
340 		sc->intr.error = true;
341 	}
342 
343 	/* handle operational state */
344 	if (qcom_spi_hw_is_opmode_dma_locked(sc)) {
345 		/* ACK interrupts now */
346 		QCOM_SPI_WRITE_4(sc, QUP_OPERATIONAL, op_flags);
347 		if ((op_flags & QUP_OP_IN_SERVICE_FLAG)
348 		    && (op_flags & QUP_OP_MAX_INPUT_DONE_FLAG))
349 			sc->intr.rx_dma_done = true;
350 		if ((op_flags & QUP_OP_OUT_SERVICE_FLAG)
351 		    && (op_flags & QUP_OP_MAX_OUTPUT_DONE_FLAG))
352 			sc->intr.tx_dma_done = true;
353 	} else {
354 		/* FIFO/Block */
355 		if (op_flags & QUP_OP_IN_SERVICE_FLAG)
356 			sc->intr.do_rx = true;
357 		if (op_flags & QUP_OP_OUT_SERVICE_FLAG)
358 			sc->intr.do_tx = true;
359 	}
360 
361 	/* Check if we've finished transfers */
362 	if (op_flags & QUP_OP_MAX_INPUT_DONE_FLAG)
363 		sc->intr.done = true;
364 	if (sc->intr.error)
365 		sc->intr.done = true;
366 
367 	return (0);
368 }
369 
370 /*
371  * Make initial transfer selections based on the transfer sizes
372  * and alignment.
373  *
374  * For now this'll just default to FIFO until that works, and then
375  * will grow to include BLOCK / DMA as appropriate.
376  */
377 int
378 qcom_spi_hw_setup_transfer_selection(struct qcom_spi_softc *sc, uint32_t len)
379 {
380 
381 	QCOM_SPI_ASSERT_LOCKED(sc);
382 
383 	/*
384 	 * For now only support doing a single FIFO transfer.
385 	 * The main PIO transfer routine loop will break it up for us.
386 	 */
387 	sc->state.transfer_mode = QUP_IO_M_MODE_FIFO;
388 	sc->transfer.tx_offset = 0;
389 	sc->transfer.rx_offset = 0;
390 	sc->transfer.tx_len = 0;
391 	sc->transfer.rx_len = 0;
392 	sc->transfer.tx_buf = NULL;
393 	sc->transfer.rx_buf = NULL;
394 
395 	/*
396 	 * If we're sending a DWORD multiple sized block (like IO buffers)
397 	 * then we can totally just use the DWORD size transfers.
398 	 *
399 	 * This is really only valid for PIO/block modes; I'm not yet
400 	 * sure what we should do for DMA modes.
401 	 */
402 	if (len > 0 && len % 4 == 0)
403 		sc->state.transfer_word_size = 4;
404 	else
405 		sc->state.transfer_word_size = 1;
406 
407 	return (0);
408 }
409 
410 /*
411  * Blank the transfer state after a full transfer is completed.
412  */
413 int
414 qcom_spi_hw_complete_transfer(struct qcom_spi_softc *sc)
415 {
416 	QCOM_SPI_ASSERT_LOCKED(sc);
417 
418 	sc->state.transfer_mode = QUP_IO_M_MODE_FIFO;
419 	sc->transfer.tx_offset = 0;
420 	sc->transfer.rx_offset = 0;
421 	sc->transfer.tx_len = 0;
422 	sc->transfer.rx_len = 0;
423 	sc->transfer.tx_buf = NULL;
424 	sc->transfer.rx_buf = NULL;
425 	sc->state.transfer_word_size = 0;
426 	return (0);
427 }
428 
429 /*
430  * Configure up the transfer selection for the current transfer.
431  *
432  * This calculates how many words we can transfer in the current
433  * transfer and what's left to transfer.
434  */
435 int
436 qcom_spi_hw_setup_current_transfer(struct qcom_spi_softc *sc)
437 {
438 	uint32_t bytes_left;
439 
440 	QCOM_SPI_ASSERT_LOCKED(sc);
441 
442 	/*
443 	 * XXX For now, base this on the TX side buffer size, not both.
444 	 * Later on we'll want to configure it based on the MAX of
445 	 * either and just eat up the dummy values in the PIO
446 	 * routines.  (For DMA it's .. more annoyingly complicated
447 	 * if the transfer sizes are not symmetrical.)
448 	 */
449 	bytes_left = sc->transfer.tx_len - sc->transfer.tx_offset;
450 
451 	if (sc->state.transfer_mode == QUP_IO_M_MODE_FIFO) {
452 		/*
453 		 * For FIFO transfers the num_words limit depends upon
454 		 * the word size, FIFO size and how many bytes are left.
455 		 * It definitely will be under SPI_MAX_XFER so don't
456 		 * worry about that here.
457 		 */
458 		sc->transfer.num_words = bytes_left / sc->state.transfer_word_size;
459 		sc->transfer.num_words = MIN(sc->transfer.num_words,
460 		    sc->config.input_fifo_size / sizeof(uint32_t));
461 	} else if (sc->state.transfer_mode == QUP_IO_M_MODE_BLOCK) {
462 		/*
463 		 * For BLOCK transfers the logic will be a little different.
464 		 * Instead of it being based on the maximum input_fifo_size,
465 		 * it'll be broken down into the 'words per block" size but
466 		 * our maximum transfer size will ACTUALLY be capped by
467 		 * SPI_MAX_XFER (65536-64 bytes.)  Each transfer
468 		 * will end up being in multiples of a block until the
469 		 * last transfer.
470 		 */
471 		sc->transfer.num_words = bytes_left / sc->state.transfer_word_size;
472 		sc->transfer.num_words = MIN(sc->transfer.num_words,
473 		    SPI_MAX_XFER);
474 	}
475 
476 
477 	QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_TRANSFER_SETUP,
478 	"%s: transfer.tx_len=%u,"
479 	    "transfer.tx_offset=%u,"
480 	    " transfer_word_size=%u,"
481 	    " bytes_left=%u, num_words=%u, fifo_word_max=%u\n",
482 	    __func__,
483 	    sc->transfer.tx_len,
484 	    sc->transfer.tx_offset,
485 	    sc->state.transfer_word_size,
486 	    bytes_left,
487 	    sc->transfer.num_words,
488 	    sc->config.input_fifo_size / sizeof(uint32_t));
489 
490 	return (0);
491 }
492 
493 /*
494  * Setup the PIO FIFO transfer count.
495  *
496  * Note that we get a /single/ TX/RX phase up to these num_words
497  * transfers.
498  */
499 int
500 qcom_spi_hw_setup_pio_transfer_cnt(struct qcom_spi_softc *sc)
501 {
502 
503 	QCOM_SPI_ASSERT_LOCKED(sc);
504 
505 	QCOM_SPI_WRITE_4(sc, QUP_MX_READ_CNT, sc->transfer.num_words);
506 	QCOM_SPI_WRITE_4(sc, QUP_MX_WRITE_CNT, sc->transfer.num_words);
507 	QCOM_SPI_WRITE_4(sc, QUP_MX_INPUT_CNT, 0);
508 	QCOM_SPI_WRITE_4(sc, QUP_MX_OUTPUT_CNT, 0);
509 
510 	QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_TRANSFER_SETUP,
511 	    "%s: num_words=%u\n", __func__,
512 	    sc->transfer.num_words);
513 
514 	QCOM_SPI_BARRIER_WRITE(sc);
515 
516 	return (0);
517 }
518 
519 /*
520  * Setup the PIO BLOCK transfer count.
521  *
522  * This sets up the total transfer size, in TX/RX FIFO block size
523  * chunks.  We will get multiple notifications when a block sized
524  * chunk of data is avaliable or required.
525  */
526 int
527 qcom_spi_hw_setup_block_transfer_cnt(struct qcom_spi_softc *sc)
528 {
529 
530 	QCOM_SPI_ASSERT_LOCKED(sc);
531 
532 	QCOM_SPI_WRITE_4(sc, QUP_MX_READ_CNT, 0);
533 	QCOM_SPI_WRITE_4(sc, QUP_MX_WRITE_CNT, 0);
534 	QCOM_SPI_WRITE_4(sc, QUP_MX_INPUT_CNT, sc->transfer.num_words);
535 	QCOM_SPI_WRITE_4(sc, QUP_MX_OUTPUT_CNT, sc->transfer.num_words);
536 	QCOM_SPI_BARRIER_WRITE(sc);
537 
538 	return (0);
539 }
540 
541 int
542 qcom_spi_hw_setup_io_modes(struct qcom_spi_softc *sc)
543 {
544 	uint32_t reg;
545 
546 	QCOM_SPI_ASSERT_LOCKED(sc);
547 
548 	reg = QCOM_SPI_READ_4(sc, QUP_IO_M_MODES);
549 
550 	reg &= ~((QUP_IO_M_INPUT_MODE_MASK << QUP_IO_M_INPUT_MODE_SHIFT)
551 	    | (QUP_IO_M_OUTPUT_MODE_MASK << QUP_IO_M_OUTPUT_MODE_SHIFT));
552 
553 	/*
554 	 * If it's being done using DMA then the hardware will
555 	 * need to pack and unpack the byte stream into the word/dword
556 	 * stream being expected by the SPI/QUP micro engine.
557 	 *
558 	 * For PIO modes we're doing the pack/unpack in software,
559 	 * see the pio/block transfer routines.
560 	 */
561 	if (qcom_spi_hw_is_opmode_dma_locked(sc))
562 		reg |= (QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
563 	else
564 		reg &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
565 
566 	/* Transfer mode */
567 	reg |= ((sc->state.transfer_mode & QUP_IO_M_INPUT_MODE_MASK)
568 	    << QUP_IO_M_INPUT_MODE_SHIFT);
569 	reg |= ((sc->state.transfer_mode & QUP_IO_M_OUTPUT_MODE_MASK)
570 	    << QUP_IO_M_OUTPUT_MODE_SHIFT);
571 
572 	QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_TRANSFER_SETUP,
573 	    "%s: QUP_IO_M_MODES=0x%08x\n", __func__, reg);
574 
575 	QCOM_SPI_WRITE_4(sc, QUP_IO_M_MODES, reg);
576 	QCOM_SPI_BARRIER_WRITE(sc);
577 
578 	return (0);
579 }
580 
581 int
582 qcom_spi_hw_setup_spi_io_clock_polarity(struct qcom_spi_softc *sc,
583     bool cpol)
584 {
585 	uint32_t reg;
586 
587 	QCOM_SPI_ASSERT_LOCKED(sc);
588 
589 	reg = QCOM_SPI_READ_4(sc, SPI_IO_CONTROL);
590 
591 	if (cpol)
592 		reg |= SPI_IO_C_CLK_IDLE_HIGH;
593 	else
594 		reg &= ~SPI_IO_C_CLK_IDLE_HIGH;
595 
596 	QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_TRANSFER_SETUP,
597 	    "%s: SPI_IO_CONTROL=0x%08x\n", __func__, reg);
598 
599 	QCOM_SPI_WRITE_4(sc, SPI_IO_CONTROL, reg);
600 	QCOM_SPI_BARRIER_WRITE(sc);
601 
602 	return (0);
603 }
604 
605 int
606 qcom_spi_hw_setup_spi_config(struct qcom_spi_softc *sc, uint32_t clock_val,
607     bool cpha)
608 {
609 	uint32_t reg;
610 
611 	/*
612 	 * For now we don't have a way to configure loopback SPI for testing,
613 	 * or the clock/transfer phase.  When we do then here's where we
614 	 * would put that.
615 	 */
616 
617 	QCOM_SPI_ASSERT_LOCKED(sc);
618 
619 	reg = QCOM_SPI_READ_4(sc, SPI_CONFIG);
620 	reg &= ~SPI_CONFIG_LOOPBACK;
621 
622 	if (cpha)
623 		reg &= ~SPI_CONFIG_INPUT_FIRST;
624 	else
625 		reg |= SPI_CONFIG_INPUT_FIRST;
626 
627 	/*
628 	 * If the frequency is above SPI_HS_MIN_RATE then enable high speed.
629 	 * This apparently improves stability.
630 	 *
631 	 * Note - don't do this if SPI loopback is enabled!
632 	 */
633 	if (clock_val >= SPI_HS_MIN_RATE)
634 		reg |= SPI_CONFIG_HS_MODE;
635 	else
636 		reg &= ~SPI_CONFIG_HS_MODE;
637 
638 	QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_TRANSFER_SETUP,
639 	    "%s: SPI_CONFIG=0x%08x\n", __func__, reg);
640 
641 	QCOM_SPI_WRITE_4(sc, SPI_CONFIG, reg);
642 	QCOM_SPI_BARRIER_WRITE(sc);
643 
644 	return (0);
645 }
646 
647 int
648 qcom_spi_hw_setup_qup_config(struct qcom_spi_softc *sc, bool is_tx, bool is_rx)
649 {
650 	uint32_t reg;
651 
652 	QCOM_SPI_ASSERT_LOCKED(sc);
653 
654 	reg = QCOM_SPI_READ_4(sc, QUP_CONFIG);
655 	reg &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
656 
657 	/* SPI mode */
658 	reg |= QUP_CONFIG_SPI_MODE;
659 
660 	/* bitmask for number of bits per word being used in each FIFO slot */
661 	reg |= ((sc->state.transfer_word_size * 8) - 1) & QUP_CONFIG_N;
662 
663 	/*
664 	 * When doing DMA we need to configure whether we are shifting
665 	 * data in, out, and/or both.  For PIO/block modes it must stay
666 	 * unset.
667 	 */
668 	if (qcom_spi_hw_is_opmode_dma_locked(sc)) {
669 		if (is_rx == false)
670 			reg |= QUP_CONFIG_NO_INPUT;
671 		if (is_tx == false)
672 			reg |= QUP_CONFIG_NO_OUTPUT;
673 	}
674 
675 	QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_TRANSFER_SETUP,
676 	    "%s: QUP_CONFIG=0x%08x\n", __func__, reg);
677 
678 	QCOM_SPI_WRITE_4(sc, QUP_CONFIG, reg);
679 	QCOM_SPI_BARRIER_WRITE(sc);
680 
681 	return (0);
682 }
683 
684 int
685 qcom_spi_hw_setup_operational_mask(struct qcom_spi_softc *sc)
686 {
687 
688 	QCOM_SPI_ASSERT_LOCKED(sc);
689 
690 	if (QCOM_SPI_QUP_VERSION_V1(sc)) {
691 		QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_TRANSFER_SETUP,
692 		    "%s: skipping, qupv1\n", __func__);
693 		return (0);
694 	}
695 
696 	if (qcom_spi_hw_is_opmode_dma_locked(sc))
697 		QCOM_SPI_WRITE_4(sc, QUP_OPERATIONAL_MASK,
698 		    QUP_OP_IN_SERVICE_FLAG | QUP_OP_OUT_SERVICE_FLAG);
699 	else
700 		QCOM_SPI_WRITE_4(sc, QUP_OPERATIONAL_MASK, 0);
701 
702 	QCOM_SPI_BARRIER_WRITE(sc);
703 
704 	return (0);
705 }
706 
707 /*
708  * ACK that we already have serviced the output FIFO.
709  */
710 int
711 qcom_spi_hw_ack_write_pio_fifo(struct qcom_spi_softc *sc)
712 {
713 
714 	QCOM_SPI_ASSERT_LOCKED(sc);
715 	QCOM_SPI_WRITE_4(sc, QUP_OPERATIONAL, QUP_OP_OUT_SERVICE_FLAG);
716 	QCOM_SPI_BARRIER_WRITE(sc);
717 	return (0);
718 }
719 
720 int
721 qcom_spi_hw_ack_opmode(struct qcom_spi_softc *sc)
722 {
723 
724 	QCOM_SPI_ASSERT_LOCKED(sc);
725 
726 	QCOM_SPI_BARRIER_READ(sc);
727 	QCOM_SPI_READ_4(sc, QUP_OPERATIONAL);
728 	QCOM_SPI_WRITE_4(sc, QUP_OPERATIONAL, QUP_OP_OUT_SERVICE_FLAG);
729 	QCOM_SPI_BARRIER_WRITE(sc);
730 
731 	return (0);
732 
733 }
734 
735 /*
736  * Read the value from the TX buffer into the given 32 bit DWORD,
737  * pre-shifting it into the place requested.
738  *
739  * Returns true if there was a byte available, false otherwise.
740  */
741 static bool
742 qcom_spi_hw_write_from_tx_buf(struct qcom_spi_softc *sc, int shift,
743     uint32_t *val)
744 {
745 
746 	QCOM_SPI_ASSERT_LOCKED(sc);
747 
748 	if (sc->transfer.tx_buf == NULL)
749 		return false;
750 
751 	if (sc->transfer.tx_offset < sc->transfer.tx_len) {
752 		*val |= (sc->transfer.tx_buf[sc->transfer.tx_offset] & 0xff)
753 		    << shift;
754 		sc->transfer.tx_offset++;
755 		return true;
756 	}
757 
758 	return false;
759 }
760 
761 int
762 qcom_spi_hw_write_pio_fifo(struct qcom_spi_softc *sc)
763 {
764 	uint32_t i;
765 	int num_bytes = 0;
766 
767 	QCOM_SPI_ASSERT_LOCKED(sc);
768 
769 	QCOM_SPI_WRITE_4(sc, QUP_OPERATIONAL, QUP_OP_OUT_SERVICE_FLAG);
770 	QCOM_SPI_BARRIER_WRITE(sc);
771 
772 	/*
773 	 * Loop over the transfer num_words, do complain if we are full.
774 	 */
775 	for (i = 0; i < sc->transfer.num_words; i++) {
776 		uint32_t reg;
777 
778 		/* Break if FIFO is full */
779 		if ((QCOM_SPI_READ_4(sc, QUP_OPERATIONAL)
780 		    & QUP_OP_OUT_FIFO_FULL) != 0) {
781 			device_printf(sc->sc_dev, "%s: FIFO full\n", __func__);
782 			break;
783 		}
784 
785 		/*
786 		 * Handle 1, 2, 4 byte transfer packing rules.
787 		 *
788 		 * Unlike read, where the shifting is done towards the MSB
789 		 * for us by default, we have to do it ourselves for transmit.
790 		 * There's a bit that one can set to do the preshifting
791 		 * (and u-boot uses it!) but I'll stick with what Linux is
792 		 * doing to make it easier for future maintenance.
793 		 *
794 		 * The format is the same as 4 byte RX - 0xaabbccdd;
795 		 * the byte ordering on the wire being aa, bb, cc, dd.
796 		 */
797 		reg = 0;
798 		if (sc->state.transfer_word_size == 1) {
799 			if (qcom_spi_hw_write_from_tx_buf(sc, 24, &reg))
800 				num_bytes++;
801 		} else if (sc->state.transfer_word_size == 2) {
802 			if (qcom_spi_hw_write_from_tx_buf(sc, 24, &reg))
803 				num_bytes++;
804 			if (qcom_spi_hw_write_from_tx_buf(sc, 16, &reg))
805 				num_bytes++;
806 		} else if (sc->state.transfer_word_size == 4) {
807 			if (qcom_spi_hw_write_from_tx_buf(sc, 24, &reg))
808 				num_bytes++;
809 			if (qcom_spi_hw_write_from_tx_buf(sc, 16, &reg))
810 				num_bytes++;
811 			if (qcom_spi_hw_write_from_tx_buf(sc, 8, &reg))
812 				num_bytes++;
813 			if (qcom_spi_hw_write_from_tx_buf(sc, 0, &reg))
814 				num_bytes++;
815 		}
816 
817 		/*
818 		 * always shift out something in case we need phantom
819 		 * writes to finish things up whilst we read a reply
820 		 * payload.
821 		 */
822 		QCOM_SPI_WRITE_4(sc, QUP_OUTPUT_FIFO, reg);
823 		QCOM_SPI_BARRIER_WRITE(sc);
824 	}
825 
826 	QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_TX_FIFO,
827 	    "%s: wrote %d bytes (%d fifo slots)\n",
828 	    __func__, num_bytes, sc->transfer.num_words);
829 
830 	return (0);
831 }
832 
833 int
834 qcom_spi_hw_write_pio_block(struct qcom_spi_softc *sc)
835 {
836 	/* Not yet implemented */
837 	return (ENXIO);
838 }
839 
840 /*
841  * Read data into the RX buffer and increment the RX offset.
842  *
843  * Return true if the byte was saved into the RX buffer, else
844  * return false.
845  */
846 static bool
847 qcom_spi_hw_read_into_rx_buf(struct qcom_spi_softc *sc, uint8_t val)
848 {
849 	QCOM_SPI_ASSERT_LOCKED(sc);
850 
851 	if (sc->transfer.rx_buf == NULL)
852 		return false;
853 
854 	/* Make sure we aren't overflowing the receive buffer */
855 	if (sc->transfer.rx_offset < sc->transfer.rx_len) {
856 		sc->transfer.rx_buf[sc->transfer.rx_offset] = val;
857 		sc->transfer.rx_offset++;
858 		return true;
859 	}
860 	return false;
861 }
862 
863 /*
864  * Read "n_words" transfers, and push those bytes into the receive buffer.
865  * Make sure we have enough space, and make sure we don't overflow the
866  * read buffer size too!
867  */
868 int
869 qcom_spi_hw_read_pio_fifo(struct qcom_spi_softc *sc)
870 {
871 	uint32_t i;
872 	uint32_t reg;
873 	int num_bytes = 0;
874 
875 	QCOM_SPI_ASSERT_LOCKED(sc);
876 
877 	QCOM_SPI_WRITE_4(sc, QUP_OPERATIONAL, QUP_OP_IN_SERVICE_FLAG);
878 	QCOM_SPI_BARRIER_WRITE(sc);
879 
880 	for (i = 0; i < sc->transfer.num_words; i++) {
881 		/* Break if FIFO is empty */
882 		QCOM_SPI_BARRIER_READ(sc);
883 		reg = QCOM_SPI_READ_4(sc, QUP_OPERATIONAL);
884 		if ((reg & QUP_OP_IN_FIFO_NOT_EMPTY) == 0) {
885 			device_printf(sc->sc_dev, "%s: FIFO empty\n", __func__);
886 			break;
887 		}
888 
889 		/*
890 		 * Always read num_words up to FIFO being non-empty; that way
891 		 * if we have mis-matching TX/RX buffer sizes for some reason
892 		 * we will read the needed phantom bytes.
893 		 */
894 		reg = QCOM_SPI_READ_4(sc, QUP_INPUT_FIFO);
895 
896 		/*
897 		 * Unpack the receive buffer based on whether we are
898 		 * doing 1, 2, or 4 byte transfer words.
899 		 */
900 		if (sc->state.transfer_word_size == 1) {
901 			if (qcom_spi_hw_read_into_rx_buf(sc, reg & 0xff))
902 				num_bytes++;
903 		} else if (sc->state.transfer_word_size == 2) {
904 			if (qcom_spi_hw_read_into_rx_buf(sc, (reg >> 8) & 0xff))
905 				num_bytes++;
906 			if (qcom_spi_hw_read_into_rx_buf(sc, reg & 0xff))
907 				num_bytes++;
908 		} else if (sc->state.transfer_word_size == 4) {
909 			if (qcom_spi_hw_read_into_rx_buf(sc, (reg >> 24) & 0xff))
910 				num_bytes++;
911 			if (qcom_spi_hw_read_into_rx_buf(sc, (reg >> 16) & 0xff))
912 				num_bytes++;
913 			if (qcom_spi_hw_read_into_rx_buf(sc, (reg >> 8) & 0xff))
914 				num_bytes++;
915 			if (qcom_spi_hw_read_into_rx_buf(sc, reg & 0xff))
916 				num_bytes++;
917 		}
918 	}
919 
920 	QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_TX_FIFO,
921 	    "%s: read %d bytes (%d transfer words)\n",
922 	    __func__, num_bytes, sc->transfer.num_words);
923 
924 #if 0
925 	/*
926 	 * This is a no-op for FIFO mode, it's only a thing for BLOCK
927 	 * transfers.
928 	 */
929 	QCOM_SPI_BARRIER_READ(sc);
930 	reg = QCOM_SPI_READ_4(sc, QUP_OPERATIONAL);
931 	if (reg & QUP_OP_MAX_INPUT_DONE_FLAG) {
932 		device_printf(sc->sc_dev, "%s: read complete (DONE)\n" ,
933 		    __func__);
934 		sc->intr.done = true;
935 	}
936 #endif
937 
938 #if 0
939 	/*
940 	 * And see if we've finished the transfer and won't be getting
941 	 * any more.  Then treat it as done as well.
942 	 *
943 	 * In FIFO only mode we don't get a completion interrupt;
944 	 * we get an interrupt when the FIFO has enough data present.
945 	 */
946 	if ((sc->state.transfer_mode == QUP_IO_M_MODE_FIFO)
947 	    && (sc->transfer.rx_offset >= sc->transfer.rx_len)) {
948 		device_printf(sc->sc_dev, "%s: read complete (rxlen)\n",
949 		    __func__);
950 		sc->intr.done = true;
951 	}
952 #endif
953 
954 	/*
955 	 * For FIFO transfers we get a /single/ result that complete
956 	 * the FIFO transfer.  We won't get any subsequent transfers;
957 	 * we'll need to schedule a new FIFO transfer.
958 	 */
959 	sc->intr.done = true;
960 
961 	return (0);
962 }
963 
964 int
965 qcom_spi_hw_read_pio_block(struct qcom_spi_softc *sc)
966 {
967 
968 	/* Not yet implemented */
969 	return (ENXIO);
970 }
971 
972 int
973 qcom_spi_hw_do_full_reset(struct qcom_spi_softc *sc)
974 {
975 	QCOM_SPI_ASSERT_LOCKED(sc);
976 
977 	QCOM_SPI_WRITE_4(sc, QUP_SW_RESET, 1);
978 	QCOM_SPI_BARRIER_WRITE(sc);
979 	DELAY(100);
980 
981 	return (0);
982 }
983