xref: /freebsd/sys/dev/qcom_qup/qcom_spi_hw.c (revision 1f469a9fc498c3d406ef7c4e347232678f49da0a)
1d27ba308SAdrian Chadd /*-
24d846d26SWarner Losh  * SPDX-License-Identifier: BSD-2-Clause
3d27ba308SAdrian Chadd  *
4d27ba308SAdrian Chadd  * Copyright (c) 2021, Adrian Chadd <adrian@FreeBSD.org>
5d27ba308SAdrian Chadd  *
6d27ba308SAdrian Chadd  * Redistribution and use in source and binary forms, with or without
7d27ba308SAdrian Chadd  * modification, are permitted provided that the following conditions
8d27ba308SAdrian Chadd  * are met:
9d27ba308SAdrian Chadd  * 1. Redistributions of source code must retain the above copyright
10d27ba308SAdrian Chadd  *    notice unmodified, this list of conditions, and the following
11d27ba308SAdrian Chadd  *    disclaimer.
12d27ba308SAdrian Chadd  * 2. Redistributions in binary form must reproduce the above copyright
13d27ba308SAdrian Chadd  *    notice, this list of conditions and the following disclaimer in the
14d27ba308SAdrian Chadd  *    documentation and/or other materials provided with the distribution.
15d27ba308SAdrian Chadd  *
16d27ba308SAdrian Chadd  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17d27ba308SAdrian Chadd  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18d27ba308SAdrian Chadd  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19d27ba308SAdrian Chadd  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20d27ba308SAdrian Chadd  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21d27ba308SAdrian Chadd  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22d27ba308SAdrian Chadd  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23d27ba308SAdrian Chadd  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24d27ba308SAdrian Chadd  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25d27ba308SAdrian Chadd  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26d27ba308SAdrian Chadd  * SUCH DAMAGE.
27d27ba308SAdrian Chadd  */
28d27ba308SAdrian Chadd 
29d27ba308SAdrian Chadd #include <sys/param.h>
30d27ba308SAdrian Chadd #include <sys/systm.h>
31d27ba308SAdrian Chadd 
32d27ba308SAdrian Chadd #include <sys/bus.h>
33d27ba308SAdrian Chadd #include <sys/interrupt.h>
34d27ba308SAdrian Chadd #include <sys/malloc.h>
35d27ba308SAdrian Chadd #include <sys/lock.h>
36d27ba308SAdrian Chadd #include <sys/mutex.h>
37d27ba308SAdrian Chadd #include <sys/kernel.h>
38d27ba308SAdrian Chadd #include <sys/module.h>
39d27ba308SAdrian Chadd #include <sys/rman.h>
40d27ba308SAdrian Chadd 
41d27ba308SAdrian Chadd #include <vm/vm.h>
42d27ba308SAdrian Chadd #include <vm/pmap.h>
43d27ba308SAdrian Chadd #include <vm/vm_extern.h>
44d27ba308SAdrian Chadd 
45d27ba308SAdrian Chadd #include <machine/bus.h>
46d27ba308SAdrian Chadd #include <machine/cpu.h>
47d27ba308SAdrian Chadd 
48d27ba308SAdrian Chadd #include <dev/gpio/gpiobusvar.h>
49d27ba308SAdrian Chadd #include <dev/ofw/ofw_bus.h>
50d27ba308SAdrian Chadd #include <dev/ofw/ofw_bus_subr.h>
51d27ba308SAdrian Chadd 
52be82b3a0SEmmanuel Vadot #include <dev/clk/clk.h>
53*1f469a9fSEmmanuel Vadot #include <dev/hwreset/hwreset.h>
54d27ba308SAdrian Chadd 
55d27ba308SAdrian Chadd #include <dev/spibus/spi.h>
56d27ba308SAdrian Chadd #include <dev/spibus/spibusvar.h>
57d27ba308SAdrian Chadd #include "spibus_if.h"
58d27ba308SAdrian Chadd 
59d27ba308SAdrian Chadd #include <dev/qcom_qup/qcom_spi_var.h>
60d27ba308SAdrian Chadd #include <dev/qcom_qup/qcom_spi_reg.h>
61d27ba308SAdrian Chadd #include <dev/qcom_qup/qcom_qup_reg.h>
62d27ba308SAdrian Chadd #include <dev/qcom_qup/qcom_spi_debug.h>
63d27ba308SAdrian Chadd 
64d27ba308SAdrian Chadd int
qcom_spi_hw_read_controller_transfer_sizes(struct qcom_spi_softc * sc)65d27ba308SAdrian Chadd qcom_spi_hw_read_controller_transfer_sizes(struct qcom_spi_softc *sc)
66d27ba308SAdrian Chadd {
67d27ba308SAdrian Chadd 	uint32_t reg, val;
68d27ba308SAdrian Chadd 
69d27ba308SAdrian Chadd 	reg = QCOM_SPI_READ_4(sc, QUP_IO_M_MODES);
70d27ba308SAdrian Chadd 
71d27ba308SAdrian Chadd 	QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_TRANSFER_SETUP,
72d27ba308SAdrian Chadd 	    "%s: QUP_IO_M_MODES=0x%08x\n", __func__, reg);
73d27ba308SAdrian Chadd 
74d27ba308SAdrian Chadd 	/* Input block size */
75d27ba308SAdrian Chadd 	val = (reg >> QUP_IO_M_INPUT_BLOCK_SIZE_SHIFT)
76d27ba308SAdrian Chadd 	    & QUP_IO_M_INPUT_BLOCK_SIZE_MASK;
77d27ba308SAdrian Chadd 	if (val == 0)
78d27ba308SAdrian Chadd 		sc->config.input_block_size = 4;
79d27ba308SAdrian Chadd 	else
80d27ba308SAdrian Chadd 		sc->config.input_block_size = val * 16;
81d27ba308SAdrian Chadd 
82d27ba308SAdrian Chadd 	/* Output block size */
83d27ba308SAdrian Chadd 	val = (reg >> QUP_IO_M_OUTPUT_BLOCK_SIZE_SHIFT)
84d27ba308SAdrian Chadd 	    & QUP_IO_M_OUTPUT_BLOCK_SIZE_MASK;
85d27ba308SAdrian Chadd 	if (val == 0)
86d27ba308SAdrian Chadd 		sc->config.output_block_size = 4;
87d27ba308SAdrian Chadd 	else
88d27ba308SAdrian Chadd 		sc->config.output_block_size = val * 16;
89d27ba308SAdrian Chadd 
90d27ba308SAdrian Chadd 	/* Input FIFO size */
91d27ba308SAdrian Chadd 	val = (reg >> QUP_IO_M_INPUT_FIFO_SIZE_SHIFT)
92d27ba308SAdrian Chadd 	    & QUP_IO_M_INPUT_FIFO_SIZE_MASK;
93d27ba308SAdrian Chadd 	sc->config.input_fifo_size =
94d27ba308SAdrian Chadd 	    sc->config.input_block_size * (2 << val);
95d27ba308SAdrian Chadd 
96d27ba308SAdrian Chadd 	/* Output FIFO size */
97d27ba308SAdrian Chadd 	val = (reg >> QUP_IO_M_OUTPUT_FIFO_SIZE_SHIFT)
98d27ba308SAdrian Chadd 	    & QUP_IO_M_OUTPUT_FIFO_SIZE_MASK;
99d27ba308SAdrian Chadd 	sc->config.output_fifo_size =
100d27ba308SAdrian Chadd 	    sc->config.output_block_size * (2 << val);
101d27ba308SAdrian Chadd 
102d27ba308SAdrian Chadd 	return (0);
103d27ba308SAdrian Chadd }
104d27ba308SAdrian Chadd 
105d27ba308SAdrian Chadd static bool
qcom_spi_hw_qup_is_state_valid_locked(struct qcom_spi_softc * sc)106d27ba308SAdrian Chadd qcom_spi_hw_qup_is_state_valid_locked(struct qcom_spi_softc *sc)
107d27ba308SAdrian Chadd {
108d27ba308SAdrian Chadd 	uint32_t reg;
109d27ba308SAdrian Chadd 
110d27ba308SAdrian Chadd 	QCOM_SPI_ASSERT_LOCKED(sc);
111d27ba308SAdrian Chadd 
112d27ba308SAdrian Chadd 	reg = QCOM_SPI_READ_4(sc, QUP_STATE);
113d27ba308SAdrian Chadd 	QCOM_SPI_BARRIER_READ(sc);
114d27ba308SAdrian Chadd 
115d27ba308SAdrian Chadd 	return !! (reg & QUP_STATE_VALID);
116d27ba308SAdrian Chadd }
117d27ba308SAdrian Chadd 
118d27ba308SAdrian Chadd static int
qcom_spi_hw_qup_wait_state_valid_locked(struct qcom_spi_softc * sc)119d27ba308SAdrian Chadd qcom_spi_hw_qup_wait_state_valid_locked(struct qcom_spi_softc *sc)
120d27ba308SAdrian Chadd {
121d27ba308SAdrian Chadd 	int i;
122d27ba308SAdrian Chadd 
123d27ba308SAdrian Chadd 	for (i = 0; i < 10; i++) {
124d27ba308SAdrian Chadd 		if (qcom_spi_hw_qup_is_state_valid_locked(sc))
125d27ba308SAdrian Chadd 			break;
126d27ba308SAdrian Chadd 	}
127d27ba308SAdrian Chadd 	if (i >= 10) {
128d27ba308SAdrian Chadd 		device_printf(sc->sc_dev,
129d27ba308SAdrian Chadd 		    "ERROR: timeout waiting for valid state\n");
130d27ba308SAdrian Chadd 		return (ENXIO);
131d27ba308SAdrian Chadd 	}
132d27ba308SAdrian Chadd 	return (0);
133d27ba308SAdrian Chadd }
134d27ba308SAdrian Chadd 
135d27ba308SAdrian Chadd static bool
qcom_spi_hw_is_opmode_dma_locked(struct qcom_spi_softc * sc)136d27ba308SAdrian Chadd qcom_spi_hw_is_opmode_dma_locked(struct qcom_spi_softc *sc)
137d27ba308SAdrian Chadd {
138d27ba308SAdrian Chadd 
139d27ba308SAdrian Chadd 	QCOM_SPI_ASSERT_LOCKED(sc);
140d27ba308SAdrian Chadd 
141d27ba308SAdrian Chadd 	if (sc->state.transfer_mode == QUP_IO_M_MODE_DMOV)
142d27ba308SAdrian Chadd 		return (true);
143d27ba308SAdrian Chadd 	if (sc->state.transfer_mode == QUP_IO_M_MODE_BAM)
144d27ba308SAdrian Chadd 		return (true);
145d27ba308SAdrian Chadd 	return (false);
146d27ba308SAdrian Chadd }
147d27ba308SAdrian Chadd 
148d27ba308SAdrian Chadd int
qcom_spi_hw_qup_set_state_locked(struct qcom_spi_softc * sc,uint32_t state)149d27ba308SAdrian Chadd qcom_spi_hw_qup_set_state_locked(struct qcom_spi_softc *sc, uint32_t state)
150d27ba308SAdrian Chadd {
151d27ba308SAdrian Chadd 	uint32_t cur_state;
152d27ba308SAdrian Chadd 	int ret;
153d27ba308SAdrian Chadd 
154d27ba308SAdrian Chadd 	QCOM_SPI_ASSERT_LOCKED(sc);
155d27ba308SAdrian Chadd 
156d27ba308SAdrian Chadd 	/* Wait until the state becomes valid */
157d27ba308SAdrian Chadd 	ret = qcom_spi_hw_qup_wait_state_valid_locked(sc);
158d27ba308SAdrian Chadd 	if (ret != 0) {
159d27ba308SAdrian Chadd 		return (ret);
160d27ba308SAdrian Chadd 	}
161d27ba308SAdrian Chadd 
162d27ba308SAdrian Chadd 	cur_state = QCOM_SPI_READ_4(sc, QUP_STATE);
163d27ba308SAdrian Chadd 
164d27ba308SAdrian Chadd 	QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_STATE_CHANGE,
165d27ba308SAdrian Chadd 	    "%s: target state=%d, cur_state=0x%08x\n",
166d27ba308SAdrian Chadd 	    __func__, state, cur_state);
167d27ba308SAdrian Chadd 
168d27ba308SAdrian Chadd 	/*
169d27ba308SAdrian Chadd 	 * According to the QUP specification, when going
170d27ba308SAdrian Chadd 	 * from PAUSE to RESET, two writes are required.
171d27ba308SAdrian Chadd 	 */
172d27ba308SAdrian Chadd 	if ((state == QUP_STATE_RESET)
173d27ba308SAdrian Chadd 	    && ((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE)) {
174d27ba308SAdrian Chadd 		QCOM_SPI_WRITE_4(sc, QUP_STATE, QUP_STATE_CLEAR);
175d27ba308SAdrian Chadd 		QCOM_SPI_BARRIER_WRITE(sc);
176d27ba308SAdrian Chadd 		QCOM_SPI_WRITE_4(sc, QUP_STATE, QUP_STATE_CLEAR);
177d27ba308SAdrian Chadd 		QCOM_SPI_BARRIER_WRITE(sc);
178d27ba308SAdrian Chadd 	} else {
179d27ba308SAdrian Chadd 		cur_state &= ~QUP_STATE_MASK;
180d27ba308SAdrian Chadd 		cur_state |= state;
181d27ba308SAdrian Chadd 		QCOM_SPI_WRITE_4(sc, QUP_STATE, cur_state);
182d27ba308SAdrian Chadd 		QCOM_SPI_BARRIER_WRITE(sc);
183d27ba308SAdrian Chadd 	}
184d27ba308SAdrian Chadd 
185d27ba308SAdrian Chadd 	/* Wait until the state becomes valid */
186d27ba308SAdrian Chadd 	ret = qcom_spi_hw_qup_wait_state_valid_locked(sc);
187d27ba308SAdrian Chadd 	if (ret != 0) {
188d27ba308SAdrian Chadd 		return (ret);
189d27ba308SAdrian Chadd 	}
190d27ba308SAdrian Chadd 
191d27ba308SAdrian Chadd 	cur_state = QCOM_SPI_READ_4(sc, QUP_STATE);
192d27ba308SAdrian Chadd 
193d27ba308SAdrian Chadd 	QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_STATE_CHANGE,
194d27ba308SAdrian Chadd 	    "%s: FINISH: target state=%d, cur_state=0x%08x\n",
195d27ba308SAdrian Chadd 	    __func__, state, cur_state);
196d27ba308SAdrian Chadd 
197d27ba308SAdrian Chadd 	return (0);
198d27ba308SAdrian Chadd }
199d27ba308SAdrian Chadd 
200d27ba308SAdrian Chadd /*
201d27ba308SAdrian Chadd  * Do initial QUP setup.
202d27ba308SAdrian Chadd  *
203d27ba308SAdrian Chadd  * This is initially for the SPI driver; it would be interesting to see how
204d27ba308SAdrian Chadd  * much of this is the same with the I2C/HSUART paths.
205d27ba308SAdrian Chadd  */
206d27ba308SAdrian Chadd int
qcom_spi_hw_qup_init_locked(struct qcom_spi_softc * sc)207d27ba308SAdrian Chadd qcom_spi_hw_qup_init_locked(struct qcom_spi_softc *sc)
208d27ba308SAdrian Chadd {
209d27ba308SAdrian Chadd 	int ret;
210d27ba308SAdrian Chadd 
211d27ba308SAdrian Chadd 	QCOM_SPI_ASSERT_LOCKED(sc);
212d27ba308SAdrian Chadd 
213d27ba308SAdrian Chadd 	/* Full hardware reset */
214d27ba308SAdrian Chadd 	(void) qcom_spi_hw_do_full_reset(sc);
215d27ba308SAdrian Chadd 
216d27ba308SAdrian Chadd 	ret = qcom_spi_hw_qup_set_state_locked(sc, QUP_STATE_RESET);
217d27ba308SAdrian Chadd 	if (ret != 0) {
218d27ba308SAdrian Chadd 		device_printf(sc->sc_dev, "ERROR: %s: couldn't reset\n",
219d27ba308SAdrian Chadd 		    __func__);
220d27ba308SAdrian Chadd 		goto error;
221d27ba308SAdrian Chadd 	}
222d27ba308SAdrian Chadd 
223d27ba308SAdrian Chadd 	QCOM_SPI_WRITE_4(sc, QUP_OPERATIONAL, 0);
224d27ba308SAdrian Chadd 	QCOM_SPI_WRITE_4(sc, QUP_IO_M_MODES, 0);
225d27ba308SAdrian Chadd 	/* Note: no QUP_OPERATIONAL_MASK in QUP v1 */
226d27ba308SAdrian Chadd 	if (! QCOM_SPI_QUP_VERSION_V1(sc))
227d27ba308SAdrian Chadd 		QCOM_SPI_WRITE_4(sc, QUP_OPERATIONAL_MASK, 0);
228d27ba308SAdrian Chadd 
229d27ba308SAdrian Chadd 	/* Explicitly disable input overrun in QUP v1 */
230d27ba308SAdrian Chadd 	if (QCOM_SPI_QUP_VERSION_V1(sc))
231d27ba308SAdrian Chadd 		QCOM_SPI_WRITE_4(sc, QUP_ERROR_FLAGS_EN,
232d27ba308SAdrian Chadd 		    QUP_ERROR_OUTPUT_OVER_RUN
233d27ba308SAdrian Chadd 		    | QUP_ERROR_INPUT_UNDER_RUN
234d27ba308SAdrian Chadd 		    | QUP_ERROR_OUTPUT_UNDER_RUN);
235d27ba308SAdrian Chadd 	QCOM_SPI_BARRIER_WRITE(sc);
236d27ba308SAdrian Chadd 
237d27ba308SAdrian Chadd 	return (0);
238d27ba308SAdrian Chadd error:
239d27ba308SAdrian Chadd 	return (ret);
240d27ba308SAdrian Chadd }
241d27ba308SAdrian Chadd 
242d27ba308SAdrian Chadd /*
243d27ba308SAdrian Chadd  * Do initial SPI setup.
244d27ba308SAdrian Chadd  */
245d27ba308SAdrian Chadd int
qcom_spi_hw_spi_init_locked(struct qcom_spi_softc * sc)246d27ba308SAdrian Chadd qcom_spi_hw_spi_init_locked(struct qcom_spi_softc *sc)
247d27ba308SAdrian Chadd {
248d27ba308SAdrian Chadd 
249d27ba308SAdrian Chadd 	QCOM_SPI_ASSERT_LOCKED(sc);
250d27ba308SAdrian Chadd 
251d27ba308SAdrian Chadd 	/* Initial SPI error flags */
252d27ba308SAdrian Chadd 	QCOM_SPI_WRITE_4(sc, SPI_ERROR_FLAGS_EN,
253d27ba308SAdrian Chadd 	    QUP_ERROR_INPUT_UNDER_RUN
254d27ba308SAdrian Chadd 	    | QUP_ERROR_OUTPUT_UNDER_RUN);
255d27ba308SAdrian Chadd 	QCOM_SPI_BARRIER_WRITE(sc);
256d27ba308SAdrian Chadd 
257d27ba308SAdrian Chadd 	/* Initial SPI config */
258d27ba308SAdrian Chadd 	QCOM_SPI_WRITE_4(sc, SPI_CONFIG, 0);
259d27ba308SAdrian Chadd 	QCOM_SPI_BARRIER_WRITE(sc);
260d27ba308SAdrian Chadd 
261d27ba308SAdrian Chadd 	/* Initial CS/tri-state io control config */
262d27ba308SAdrian Chadd 	QCOM_SPI_WRITE_4(sc, SPI_IO_CONTROL,
263d27ba308SAdrian Chadd 	    SPI_IO_C_NO_TRI_STATE
264d27ba308SAdrian Chadd 	    | SPI_IO_C_CS_SELECT(sc->config.cs_select));
265d27ba308SAdrian Chadd 	QCOM_SPI_BARRIER_WRITE(sc);
266d27ba308SAdrian Chadd 
267d27ba308SAdrian Chadd 	return (0);
268d27ba308SAdrian Chadd }
269d27ba308SAdrian Chadd 
270d27ba308SAdrian Chadd /*
271d27ba308SAdrian Chadd  * Force the currently selected device CS line to be active
272d27ba308SAdrian Chadd  * or inactive.
273d27ba308SAdrian Chadd  *
274d27ba308SAdrian Chadd  * This forces it to be active or inactive rather than letting
275d27ba308SAdrian Chadd  * the SPI transfer machine do its thing.  If you want to be able
276d27ba308SAdrian Chadd  * break up a big transaction into a handful of smaller ones,
277d27ba308SAdrian Chadd  * without toggling /CS_n for that device, then you need it forced.
278d27ba308SAdrian Chadd  * (If you toggle the /CS_n to the device to inactive then active,
279d27ba308SAdrian Chadd  * NOR/NAND devices tend to stop a block transfer.)
280d27ba308SAdrian Chadd  */
281d27ba308SAdrian Chadd int
qcom_spi_hw_spi_cs_force(struct qcom_spi_softc * sc,int cs,bool enable)282d27ba308SAdrian Chadd qcom_spi_hw_spi_cs_force(struct qcom_spi_softc *sc, int cs, bool enable)
283d27ba308SAdrian Chadd {
284d27ba308SAdrian Chadd 	uint32_t reg;
285d27ba308SAdrian Chadd 
286d27ba308SAdrian Chadd 	QCOM_SPI_ASSERT_LOCKED(sc);
287d27ba308SAdrian Chadd 
288d27ba308SAdrian Chadd 	QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_CHIPSELECT,
289d27ba308SAdrian Chadd 	    "%s: called, enable=%u\n",
290d27ba308SAdrian Chadd 	    __func__, enable);
291d27ba308SAdrian Chadd 
292d27ba308SAdrian Chadd 	reg = QCOM_SPI_READ_4(sc, SPI_IO_CONTROL);
293d27ba308SAdrian Chadd 	if (enable)
294d27ba308SAdrian Chadd 		reg |= SPI_IO_C_FORCE_CS;
295d27ba308SAdrian Chadd 	else
296d27ba308SAdrian Chadd 		reg &= ~SPI_IO_C_FORCE_CS;
297d27ba308SAdrian Chadd 	reg &= ~SPI_IO_C_CS_SELECT_MASK;
298d27ba308SAdrian Chadd 	reg |= SPI_IO_C_CS_SELECT(cs);
299d27ba308SAdrian Chadd 	QCOM_SPI_WRITE_4(sc, SPI_IO_CONTROL, reg);
300d27ba308SAdrian Chadd 	QCOM_SPI_BARRIER_WRITE(sc);
301d27ba308SAdrian Chadd 
302d27ba308SAdrian Chadd 	return (0);
303d27ba308SAdrian Chadd }
304d27ba308SAdrian Chadd 
305d27ba308SAdrian Chadd /*
306d27ba308SAdrian Chadd  * ACK/store current interrupt flag state.
307d27ba308SAdrian Chadd  */
308d27ba308SAdrian Chadd int
qcom_spi_hw_interrupt_handle(struct qcom_spi_softc * sc)309d27ba308SAdrian Chadd qcom_spi_hw_interrupt_handle(struct qcom_spi_softc *sc)
310d27ba308SAdrian Chadd {
311d27ba308SAdrian Chadd 	uint32_t qup_error, spi_error, op_flags;
312d27ba308SAdrian Chadd 
313d27ba308SAdrian Chadd 	QCOM_SPI_ASSERT_LOCKED(sc);
314d27ba308SAdrian Chadd 
315d27ba308SAdrian Chadd 	/* Get QUP/SPI state */
316d27ba308SAdrian Chadd 	qup_error = QCOM_SPI_READ_4(sc, QUP_ERROR_FLAGS);
317d27ba308SAdrian Chadd 	spi_error = QCOM_SPI_READ_4(sc, SPI_ERROR_FLAGS);
318d27ba308SAdrian Chadd 	op_flags = QCOM_SPI_READ_4(sc, QUP_OPERATIONAL);
319d27ba308SAdrian Chadd 
320d27ba308SAdrian Chadd 	/* ACK state */
321d27ba308SAdrian Chadd 	QCOM_SPI_WRITE_4(sc, QUP_ERROR_FLAGS, qup_error);
322d27ba308SAdrian Chadd 	QCOM_SPI_WRITE_4(sc, SPI_ERROR_FLAGS, spi_error);
323d27ba308SAdrian Chadd 
324d27ba308SAdrian Chadd 	QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_INTR,
325d27ba308SAdrian Chadd 	    "%s: called; qup=0x%08x, spi=0x%08x, op=0x%08x\n",
326d27ba308SAdrian Chadd 	    __func__,
327d27ba308SAdrian Chadd 	    qup_error,
328d27ba308SAdrian Chadd 	    spi_error,
329d27ba308SAdrian Chadd 	    op_flags);
330d27ba308SAdrian Chadd 
331d27ba308SAdrian Chadd 	/* handle error flags */
332d27ba308SAdrian Chadd 	if (qup_error != 0) {
333d27ba308SAdrian Chadd 		device_printf(sc->sc_dev, "ERROR: (QUP) mask=0x%08x\n",
334d27ba308SAdrian Chadd 		    qup_error);
335d27ba308SAdrian Chadd 		sc->intr.error = true;
336d27ba308SAdrian Chadd 	}
337d27ba308SAdrian Chadd 	if (spi_error != 0) {
338d27ba308SAdrian Chadd 		device_printf(sc->sc_dev, "ERROR: (SPI) mask=0x%08x\n",
339d27ba308SAdrian Chadd 		    spi_error);
340d27ba308SAdrian Chadd 		sc->intr.error = true;
341d27ba308SAdrian Chadd 	}
342d27ba308SAdrian Chadd 
343d27ba308SAdrian Chadd 	/* handle operational state */
344d27ba308SAdrian Chadd 	if (qcom_spi_hw_is_opmode_dma_locked(sc)) {
345d27ba308SAdrian Chadd 		/* ACK interrupts now */
346d27ba308SAdrian Chadd 		QCOM_SPI_WRITE_4(sc, QUP_OPERATIONAL, op_flags);
347d27ba308SAdrian Chadd 		if ((op_flags & QUP_OP_IN_SERVICE_FLAG)
348d27ba308SAdrian Chadd 		    && (op_flags & QUP_OP_MAX_INPUT_DONE_FLAG))
349d27ba308SAdrian Chadd 			sc->intr.rx_dma_done = true;
350d27ba308SAdrian Chadd 		if ((op_flags & QUP_OP_OUT_SERVICE_FLAG)
351d27ba308SAdrian Chadd 		    && (op_flags & QUP_OP_MAX_OUTPUT_DONE_FLAG))
352d27ba308SAdrian Chadd 			sc->intr.tx_dma_done = true;
353d27ba308SAdrian Chadd 	} else {
354d27ba308SAdrian Chadd 		/* FIFO/Block */
355d27ba308SAdrian Chadd 		if (op_flags & QUP_OP_IN_SERVICE_FLAG)
356d27ba308SAdrian Chadd 			sc->intr.do_rx = true;
357d27ba308SAdrian Chadd 		if (op_flags & QUP_OP_OUT_SERVICE_FLAG)
358d27ba308SAdrian Chadd 			sc->intr.do_tx = true;
359d27ba308SAdrian Chadd 	}
360d27ba308SAdrian Chadd 
361d27ba308SAdrian Chadd 	/* Check if we've finished transfers */
362d27ba308SAdrian Chadd 	if (op_flags & QUP_OP_MAX_INPUT_DONE_FLAG)
363d27ba308SAdrian Chadd 		sc->intr.done = true;
364d27ba308SAdrian Chadd 	if (sc->intr.error)
365d27ba308SAdrian Chadd 		sc->intr.done = true;
366d27ba308SAdrian Chadd 
367d27ba308SAdrian Chadd 	return (0);
368d27ba308SAdrian Chadd }
369d27ba308SAdrian Chadd 
370d27ba308SAdrian Chadd /*
371d27ba308SAdrian Chadd  * Make initial transfer selections based on the transfer sizes
372d27ba308SAdrian Chadd  * and alignment.
373d27ba308SAdrian Chadd  *
374d27ba308SAdrian Chadd  * For now this'll just default to FIFO until that works, and then
375d27ba308SAdrian Chadd  * will grow to include BLOCK / DMA as appropriate.
376d27ba308SAdrian Chadd  */
377d27ba308SAdrian Chadd int
qcom_spi_hw_setup_transfer_selection(struct qcom_spi_softc * sc,uint32_t len)378d27ba308SAdrian Chadd qcom_spi_hw_setup_transfer_selection(struct qcom_spi_softc *sc, uint32_t len)
379d27ba308SAdrian Chadd {
380d27ba308SAdrian Chadd 
381d27ba308SAdrian Chadd 	QCOM_SPI_ASSERT_LOCKED(sc);
382d27ba308SAdrian Chadd 
383d27ba308SAdrian Chadd 	/*
384d27ba308SAdrian Chadd 	 * For now only support doing a single FIFO transfer.
385d27ba308SAdrian Chadd 	 * The main PIO transfer routine loop will break it up for us.
386d27ba308SAdrian Chadd 	 */
387d27ba308SAdrian Chadd 	sc->state.transfer_mode = QUP_IO_M_MODE_FIFO;
388d27ba308SAdrian Chadd 	sc->transfer.tx_offset = 0;
389d27ba308SAdrian Chadd 	sc->transfer.rx_offset = 0;
390d27ba308SAdrian Chadd 	sc->transfer.tx_len = 0;
391d27ba308SAdrian Chadd 	sc->transfer.rx_len = 0;
392d27ba308SAdrian Chadd 	sc->transfer.tx_buf = NULL;
393d27ba308SAdrian Chadd 	sc->transfer.rx_buf = NULL;
394d27ba308SAdrian Chadd 
395d27ba308SAdrian Chadd 	/*
396d27ba308SAdrian Chadd 	 * If we're sending a DWORD multiple sized block (like IO buffers)
397d27ba308SAdrian Chadd 	 * then we can totally just use the DWORD size transfers.
398d27ba308SAdrian Chadd 	 *
399d27ba308SAdrian Chadd 	 * This is really only valid for PIO/block modes; I'm not yet
400d27ba308SAdrian Chadd 	 * sure what we should do for DMA modes.
401d27ba308SAdrian Chadd 	 */
402d27ba308SAdrian Chadd 	if (len > 0 && len % 4 == 0)
403d27ba308SAdrian Chadd 		sc->state.transfer_word_size = 4;
404d27ba308SAdrian Chadd 	else
405d27ba308SAdrian Chadd 		sc->state.transfer_word_size = 1;
406d27ba308SAdrian Chadd 
407d27ba308SAdrian Chadd 	return (0);
408d27ba308SAdrian Chadd }
409d27ba308SAdrian Chadd 
410d27ba308SAdrian Chadd /*
411d27ba308SAdrian Chadd  * Blank the transfer state after a full transfer is completed.
412d27ba308SAdrian Chadd  */
413d27ba308SAdrian Chadd int
qcom_spi_hw_complete_transfer(struct qcom_spi_softc * sc)414d27ba308SAdrian Chadd qcom_spi_hw_complete_transfer(struct qcom_spi_softc *sc)
415d27ba308SAdrian Chadd {
416d27ba308SAdrian Chadd 	QCOM_SPI_ASSERT_LOCKED(sc);
417d27ba308SAdrian Chadd 
418d27ba308SAdrian Chadd 	sc->state.transfer_mode = QUP_IO_M_MODE_FIFO;
419d27ba308SAdrian Chadd 	sc->transfer.tx_offset = 0;
420d27ba308SAdrian Chadd 	sc->transfer.rx_offset = 0;
421d27ba308SAdrian Chadd 	sc->transfer.tx_len = 0;
422d27ba308SAdrian Chadd 	sc->transfer.rx_len = 0;
423d27ba308SAdrian Chadd 	sc->transfer.tx_buf = NULL;
424d27ba308SAdrian Chadd 	sc->transfer.rx_buf = NULL;
425d27ba308SAdrian Chadd 	sc->state.transfer_word_size = 0;
426d27ba308SAdrian Chadd 	return (0);
427d27ba308SAdrian Chadd }
428d27ba308SAdrian Chadd 
429d27ba308SAdrian Chadd /*
430d27ba308SAdrian Chadd  * Configure up the transfer selection for the current transfer.
431d27ba308SAdrian Chadd  *
432d27ba308SAdrian Chadd  * This calculates how many words we can transfer in the current
433d27ba308SAdrian Chadd  * transfer and what's left to transfer.
434d27ba308SAdrian Chadd  */
435d27ba308SAdrian Chadd int
qcom_spi_hw_setup_current_transfer(struct qcom_spi_softc * sc)436d27ba308SAdrian Chadd qcom_spi_hw_setup_current_transfer(struct qcom_spi_softc *sc)
437d27ba308SAdrian Chadd {
438d27ba308SAdrian Chadd 	uint32_t bytes_left;
439d27ba308SAdrian Chadd 
440d27ba308SAdrian Chadd 	QCOM_SPI_ASSERT_LOCKED(sc);
441d27ba308SAdrian Chadd 
442d27ba308SAdrian Chadd 	/*
443d27ba308SAdrian Chadd 	 * XXX For now, base this on the TX side buffer size, not both.
444d27ba308SAdrian Chadd 	 * Later on we'll want to configure it based on the MAX of
445d27ba308SAdrian Chadd 	 * either and just eat up the dummy values in the PIO
446d27ba308SAdrian Chadd 	 * routines.  (For DMA it's .. more annoyingly complicated
447d27ba308SAdrian Chadd 	 * if the transfer sizes are not symmetrical.)
448d27ba308SAdrian Chadd 	 */
449d27ba308SAdrian Chadd 	bytes_left = sc->transfer.tx_len - sc->transfer.tx_offset;
450d27ba308SAdrian Chadd 
451d27ba308SAdrian Chadd 	if (sc->state.transfer_mode == QUP_IO_M_MODE_FIFO) {
452d27ba308SAdrian Chadd 		/*
453d27ba308SAdrian Chadd 		 * For FIFO transfers the num_words limit depends upon
454d27ba308SAdrian Chadd 		 * the word size, FIFO size and how many bytes are left.
455d27ba308SAdrian Chadd 		 * It definitely will be under SPI_MAX_XFER so don't
456d27ba308SAdrian Chadd 		 * worry about that here.
457d27ba308SAdrian Chadd 		 */
458d27ba308SAdrian Chadd 		sc->transfer.num_words = bytes_left / sc->state.transfer_word_size;
459d27ba308SAdrian Chadd 		sc->transfer.num_words = MIN(sc->transfer.num_words,
460d27ba308SAdrian Chadd 		    sc->config.input_fifo_size / sizeof(uint32_t));
461d27ba308SAdrian Chadd 	} else if (sc->state.transfer_mode == QUP_IO_M_MODE_BLOCK) {
462d27ba308SAdrian Chadd 		/*
463d27ba308SAdrian Chadd 		 * For BLOCK transfers the logic will be a little different.
464d27ba308SAdrian Chadd 		 * Instead of it being based on the maximum input_fifo_size,
465d27ba308SAdrian Chadd 		 * it'll be broken down into the 'words per block" size but
466d27ba308SAdrian Chadd 		 * our maximum transfer size will ACTUALLY be capped by
467d27ba308SAdrian Chadd 		 * SPI_MAX_XFER (65536-64 bytes.)  Each transfer
468d27ba308SAdrian Chadd 		 * will end up being in multiples of a block until the
469d27ba308SAdrian Chadd 		 * last transfer.
470d27ba308SAdrian Chadd 		 */
471d27ba308SAdrian Chadd 		sc->transfer.num_words = bytes_left / sc->state.transfer_word_size;
472d27ba308SAdrian Chadd 		sc->transfer.num_words = MIN(sc->transfer.num_words,
473d27ba308SAdrian Chadd 		    SPI_MAX_XFER);
474d27ba308SAdrian Chadd 	}
475d27ba308SAdrian Chadd 
476d27ba308SAdrian Chadd 
477d27ba308SAdrian Chadd 	QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_TRANSFER_SETUP,
478d27ba308SAdrian Chadd 	"%s: transfer.tx_len=%u,"
479d27ba308SAdrian Chadd 	    "transfer.tx_offset=%u,"
480d27ba308SAdrian Chadd 	    " transfer_word_size=%u,"
481d27ba308SAdrian Chadd 	    " bytes_left=%u, num_words=%u, fifo_word_max=%u\n",
482d27ba308SAdrian Chadd 	    __func__,
483d27ba308SAdrian Chadd 	    sc->transfer.tx_len,
484d27ba308SAdrian Chadd 	    sc->transfer.tx_offset,
485d27ba308SAdrian Chadd 	    sc->state.transfer_word_size,
486d27ba308SAdrian Chadd 	    bytes_left,
487d27ba308SAdrian Chadd 	    sc->transfer.num_words,
488d27ba308SAdrian Chadd 	    sc->config.input_fifo_size / sizeof(uint32_t));
489d27ba308SAdrian Chadd 
490d27ba308SAdrian Chadd 	return (0);
491d27ba308SAdrian Chadd }
492d27ba308SAdrian Chadd 
493d27ba308SAdrian Chadd /*
494d27ba308SAdrian Chadd  * Setup the PIO FIFO transfer count.
495d27ba308SAdrian Chadd  *
496d27ba308SAdrian Chadd  * Note that we get a /single/ TX/RX phase up to these num_words
497d27ba308SAdrian Chadd  * transfers.
498d27ba308SAdrian Chadd  */
499d27ba308SAdrian Chadd int
qcom_spi_hw_setup_pio_transfer_cnt(struct qcom_spi_softc * sc)500d27ba308SAdrian Chadd qcom_spi_hw_setup_pio_transfer_cnt(struct qcom_spi_softc *sc)
501d27ba308SAdrian Chadd {
502d27ba308SAdrian Chadd 
503d27ba308SAdrian Chadd 	QCOM_SPI_ASSERT_LOCKED(sc);
504d27ba308SAdrian Chadd 
505d27ba308SAdrian Chadd 	QCOM_SPI_WRITE_4(sc, QUP_MX_READ_CNT, sc->transfer.num_words);
506d27ba308SAdrian Chadd 	QCOM_SPI_WRITE_4(sc, QUP_MX_WRITE_CNT, sc->transfer.num_words);
507d27ba308SAdrian Chadd 	QCOM_SPI_WRITE_4(sc, QUP_MX_INPUT_CNT, 0);
508d27ba308SAdrian Chadd 	QCOM_SPI_WRITE_4(sc, QUP_MX_OUTPUT_CNT, 0);
509d27ba308SAdrian Chadd 
510d27ba308SAdrian Chadd 	QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_TRANSFER_SETUP,
511d27ba308SAdrian Chadd 	    "%s: num_words=%u\n", __func__,
512d27ba308SAdrian Chadd 	    sc->transfer.num_words);
513d27ba308SAdrian Chadd 
514d27ba308SAdrian Chadd 	QCOM_SPI_BARRIER_WRITE(sc);
515d27ba308SAdrian Chadd 
516d27ba308SAdrian Chadd 	return (0);
517d27ba308SAdrian Chadd }
518d27ba308SAdrian Chadd 
519d27ba308SAdrian Chadd /*
520d27ba308SAdrian Chadd  * Setup the PIO BLOCK transfer count.
521d27ba308SAdrian Chadd  *
522d27ba308SAdrian Chadd  * This sets up the total transfer size, in TX/RX FIFO block size
523d27ba308SAdrian Chadd  * chunks.  We will get multiple notifications when a block sized
524d27ba308SAdrian Chadd  * chunk of data is avaliable or required.
525d27ba308SAdrian Chadd  */
526d27ba308SAdrian Chadd int
qcom_spi_hw_setup_block_transfer_cnt(struct qcom_spi_softc * sc)527d27ba308SAdrian Chadd qcom_spi_hw_setup_block_transfer_cnt(struct qcom_spi_softc *sc)
528d27ba308SAdrian Chadd {
529d27ba308SAdrian Chadd 
530d27ba308SAdrian Chadd 	QCOM_SPI_ASSERT_LOCKED(sc);
531d27ba308SAdrian Chadd 
532d27ba308SAdrian Chadd 	QCOM_SPI_WRITE_4(sc, QUP_MX_READ_CNT, 0);
533d27ba308SAdrian Chadd 	QCOM_SPI_WRITE_4(sc, QUP_MX_WRITE_CNT, 0);
534d27ba308SAdrian Chadd 	QCOM_SPI_WRITE_4(sc, QUP_MX_INPUT_CNT, sc->transfer.num_words);
535d27ba308SAdrian Chadd 	QCOM_SPI_WRITE_4(sc, QUP_MX_OUTPUT_CNT, sc->transfer.num_words);
536d27ba308SAdrian Chadd 	QCOM_SPI_BARRIER_WRITE(sc);
537d27ba308SAdrian Chadd 
538d27ba308SAdrian Chadd 	return (0);
539d27ba308SAdrian Chadd }
540d27ba308SAdrian Chadd 
541d27ba308SAdrian Chadd int
qcom_spi_hw_setup_io_modes(struct qcom_spi_softc * sc)542d27ba308SAdrian Chadd qcom_spi_hw_setup_io_modes(struct qcom_spi_softc *sc)
543d27ba308SAdrian Chadd {
544d27ba308SAdrian Chadd 	uint32_t reg;
545d27ba308SAdrian Chadd 
546d27ba308SAdrian Chadd 	QCOM_SPI_ASSERT_LOCKED(sc);
547d27ba308SAdrian Chadd 
548d27ba308SAdrian Chadd 	reg = QCOM_SPI_READ_4(sc, QUP_IO_M_MODES);
549d27ba308SAdrian Chadd 
550d27ba308SAdrian Chadd 	reg &= ~((QUP_IO_M_INPUT_MODE_MASK << QUP_IO_M_INPUT_MODE_SHIFT)
551d27ba308SAdrian Chadd 	    | (QUP_IO_M_OUTPUT_MODE_MASK << QUP_IO_M_OUTPUT_MODE_SHIFT));
552d27ba308SAdrian Chadd 
553d27ba308SAdrian Chadd 	/*
554d27ba308SAdrian Chadd 	 * If it's being done using DMA then the hardware will
555d27ba308SAdrian Chadd 	 * need to pack and unpack the byte stream into the word/dword
556d27ba308SAdrian Chadd 	 * stream being expected by the SPI/QUP micro engine.
557d27ba308SAdrian Chadd 	 *
558d27ba308SAdrian Chadd 	 * For PIO modes we're doing the pack/unpack in software,
559d27ba308SAdrian Chadd 	 * see the pio/block transfer routines.
560d27ba308SAdrian Chadd 	 */
561d27ba308SAdrian Chadd 	if (qcom_spi_hw_is_opmode_dma_locked(sc))
562d27ba308SAdrian Chadd 		reg |= (QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
563d27ba308SAdrian Chadd 	else
564d27ba308SAdrian Chadd 		reg &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
565d27ba308SAdrian Chadd 
566d27ba308SAdrian Chadd 	/* Transfer mode */
567d27ba308SAdrian Chadd 	reg |= ((sc->state.transfer_mode & QUP_IO_M_INPUT_MODE_MASK)
568d27ba308SAdrian Chadd 	    << QUP_IO_M_INPUT_MODE_SHIFT);
569d27ba308SAdrian Chadd 	reg |= ((sc->state.transfer_mode & QUP_IO_M_OUTPUT_MODE_MASK)
570d27ba308SAdrian Chadd 	    << QUP_IO_M_OUTPUT_MODE_SHIFT);
571d27ba308SAdrian Chadd 
572d27ba308SAdrian Chadd 	QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_TRANSFER_SETUP,
573d27ba308SAdrian Chadd 	    "%s: QUP_IO_M_MODES=0x%08x\n", __func__, reg);
574d27ba308SAdrian Chadd 
575d27ba308SAdrian Chadd 	QCOM_SPI_WRITE_4(sc, QUP_IO_M_MODES, reg);
576d27ba308SAdrian Chadd 	QCOM_SPI_BARRIER_WRITE(sc);
577d27ba308SAdrian Chadd 
578d27ba308SAdrian Chadd 	return (0);
579d27ba308SAdrian Chadd }
580d27ba308SAdrian Chadd 
581d27ba308SAdrian Chadd int
qcom_spi_hw_setup_spi_io_clock_polarity(struct qcom_spi_softc * sc,bool cpol)582d27ba308SAdrian Chadd qcom_spi_hw_setup_spi_io_clock_polarity(struct qcom_spi_softc *sc,
583d27ba308SAdrian Chadd     bool cpol)
584d27ba308SAdrian Chadd {
585d27ba308SAdrian Chadd 	uint32_t reg;
586d27ba308SAdrian Chadd 
587d27ba308SAdrian Chadd 	QCOM_SPI_ASSERT_LOCKED(sc);
588d27ba308SAdrian Chadd 
589d27ba308SAdrian Chadd 	reg = QCOM_SPI_READ_4(sc, SPI_IO_CONTROL);
590d27ba308SAdrian Chadd 
591d27ba308SAdrian Chadd 	if (cpol)
592d27ba308SAdrian Chadd 		reg |= SPI_IO_C_CLK_IDLE_HIGH;
593d27ba308SAdrian Chadd 	else
594d27ba308SAdrian Chadd 		reg &= ~SPI_IO_C_CLK_IDLE_HIGH;
595d27ba308SAdrian Chadd 
596d27ba308SAdrian Chadd 	QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_TRANSFER_SETUP,
597d27ba308SAdrian Chadd 	    "%s: SPI_IO_CONTROL=0x%08x\n", __func__, reg);
598d27ba308SAdrian Chadd 
599d27ba308SAdrian Chadd 	QCOM_SPI_WRITE_4(sc, SPI_IO_CONTROL, reg);
600d27ba308SAdrian Chadd 	QCOM_SPI_BARRIER_WRITE(sc);
601d27ba308SAdrian Chadd 
602d27ba308SAdrian Chadd 	return (0);
603d27ba308SAdrian Chadd }
604d27ba308SAdrian Chadd 
605d27ba308SAdrian Chadd int
qcom_spi_hw_setup_spi_config(struct qcom_spi_softc * sc,uint32_t clock_val,bool cpha)606d27ba308SAdrian Chadd qcom_spi_hw_setup_spi_config(struct qcom_spi_softc *sc, uint32_t clock_val,
607d27ba308SAdrian Chadd     bool cpha)
608d27ba308SAdrian Chadd {
609d27ba308SAdrian Chadd 	uint32_t reg;
610d27ba308SAdrian Chadd 
611d27ba308SAdrian Chadd 	/*
612d27ba308SAdrian Chadd 	 * For now we don't have a way to configure loopback SPI for testing,
613d27ba308SAdrian Chadd 	 * or the clock/transfer phase.  When we do then here's where we
614d27ba308SAdrian Chadd 	 * would put that.
615d27ba308SAdrian Chadd 	 */
616d27ba308SAdrian Chadd 
617d27ba308SAdrian Chadd 	QCOM_SPI_ASSERT_LOCKED(sc);
618d27ba308SAdrian Chadd 
619d27ba308SAdrian Chadd 	reg = QCOM_SPI_READ_4(sc, SPI_CONFIG);
620d27ba308SAdrian Chadd 	reg &= ~SPI_CONFIG_LOOPBACK;
621d27ba308SAdrian Chadd 
622d27ba308SAdrian Chadd 	if (cpha)
623d27ba308SAdrian Chadd 		reg &= ~SPI_CONFIG_INPUT_FIRST;
624d27ba308SAdrian Chadd 	else
625d27ba308SAdrian Chadd 		reg |= SPI_CONFIG_INPUT_FIRST;
626d27ba308SAdrian Chadd 
627d27ba308SAdrian Chadd 	/*
628d27ba308SAdrian Chadd 	 * If the frequency is above SPI_HS_MIN_RATE then enable high speed.
629d27ba308SAdrian Chadd 	 * This apparently improves stability.
630d27ba308SAdrian Chadd 	 *
631d27ba308SAdrian Chadd 	 * Note - don't do this if SPI loopback is enabled!
632d27ba308SAdrian Chadd 	 */
633d27ba308SAdrian Chadd 	if (clock_val >= SPI_HS_MIN_RATE)
634d27ba308SAdrian Chadd 		reg |= SPI_CONFIG_HS_MODE;
635d27ba308SAdrian Chadd 	else
636d27ba308SAdrian Chadd 		reg &= ~SPI_CONFIG_HS_MODE;
637d27ba308SAdrian Chadd 
638d27ba308SAdrian Chadd 	QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_TRANSFER_SETUP,
639d27ba308SAdrian Chadd 	    "%s: SPI_CONFIG=0x%08x\n", __func__, reg);
640d27ba308SAdrian Chadd 
641d27ba308SAdrian Chadd 	QCOM_SPI_WRITE_4(sc, SPI_CONFIG, reg);
642d27ba308SAdrian Chadd 	QCOM_SPI_BARRIER_WRITE(sc);
643d27ba308SAdrian Chadd 
644d27ba308SAdrian Chadd 	return (0);
645d27ba308SAdrian Chadd }
646d27ba308SAdrian Chadd 
647d27ba308SAdrian Chadd int
qcom_spi_hw_setup_qup_config(struct qcom_spi_softc * sc,bool is_tx,bool is_rx)648d27ba308SAdrian Chadd qcom_spi_hw_setup_qup_config(struct qcom_spi_softc *sc, bool is_tx, bool is_rx)
649d27ba308SAdrian Chadd {
650d27ba308SAdrian Chadd 	uint32_t reg;
651d27ba308SAdrian Chadd 
652d27ba308SAdrian Chadd 	QCOM_SPI_ASSERT_LOCKED(sc);
653d27ba308SAdrian Chadd 
654d27ba308SAdrian Chadd 	reg = QCOM_SPI_READ_4(sc, QUP_CONFIG);
655d27ba308SAdrian Chadd 	reg &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
656d27ba308SAdrian Chadd 
657d27ba308SAdrian Chadd 	/* SPI mode */
658d27ba308SAdrian Chadd 	reg |= QUP_CONFIG_SPI_MODE;
659d27ba308SAdrian Chadd 
660d27ba308SAdrian Chadd 	/* bitmask for number of bits per word being used in each FIFO slot */
661d27ba308SAdrian Chadd 	reg |= ((sc->state.transfer_word_size * 8) - 1) & QUP_CONFIG_N;
662d27ba308SAdrian Chadd 
663d27ba308SAdrian Chadd 	/*
664d27ba308SAdrian Chadd 	 * When doing DMA we need to configure whether we are shifting
665d27ba308SAdrian Chadd 	 * data in, out, and/or both.  For PIO/block modes it must stay
666d27ba308SAdrian Chadd 	 * unset.
667d27ba308SAdrian Chadd 	 */
668d27ba308SAdrian Chadd 	if (qcom_spi_hw_is_opmode_dma_locked(sc)) {
669d27ba308SAdrian Chadd 		if (is_rx == false)
670d27ba308SAdrian Chadd 			reg |= QUP_CONFIG_NO_INPUT;
671d27ba308SAdrian Chadd 		if (is_tx == false)
672d27ba308SAdrian Chadd 			reg |= QUP_CONFIG_NO_OUTPUT;
673d27ba308SAdrian Chadd 	}
674d27ba308SAdrian Chadd 
675d27ba308SAdrian Chadd 	QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_TRANSFER_SETUP,
676d27ba308SAdrian Chadd 	    "%s: QUP_CONFIG=0x%08x\n", __func__, reg);
677d27ba308SAdrian Chadd 
678d27ba308SAdrian Chadd 	QCOM_SPI_WRITE_4(sc, QUP_CONFIG, reg);
679d27ba308SAdrian Chadd 	QCOM_SPI_BARRIER_WRITE(sc);
680d27ba308SAdrian Chadd 
681d27ba308SAdrian Chadd 	return (0);
682d27ba308SAdrian Chadd }
683d27ba308SAdrian Chadd 
684d27ba308SAdrian Chadd int
qcom_spi_hw_setup_operational_mask(struct qcom_spi_softc * sc)685d27ba308SAdrian Chadd qcom_spi_hw_setup_operational_mask(struct qcom_spi_softc *sc)
686d27ba308SAdrian Chadd {
687d27ba308SAdrian Chadd 
688d27ba308SAdrian Chadd 	QCOM_SPI_ASSERT_LOCKED(sc);
689d27ba308SAdrian Chadd 
690d27ba308SAdrian Chadd 	if (QCOM_SPI_QUP_VERSION_V1(sc)) {
691d27ba308SAdrian Chadd 		QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_TRANSFER_SETUP,
692d27ba308SAdrian Chadd 		    "%s: skipping, qupv1\n", __func__);
693d27ba308SAdrian Chadd 		return (0);
694d27ba308SAdrian Chadd 	}
695d27ba308SAdrian Chadd 
696d27ba308SAdrian Chadd 	if (qcom_spi_hw_is_opmode_dma_locked(sc))
697d27ba308SAdrian Chadd 		QCOM_SPI_WRITE_4(sc, QUP_OPERATIONAL_MASK,
698d27ba308SAdrian Chadd 		    QUP_OP_IN_SERVICE_FLAG | QUP_OP_OUT_SERVICE_FLAG);
699d27ba308SAdrian Chadd 	else
700d27ba308SAdrian Chadd 		QCOM_SPI_WRITE_4(sc, QUP_OPERATIONAL_MASK, 0);
701d27ba308SAdrian Chadd 
702d27ba308SAdrian Chadd 	QCOM_SPI_BARRIER_WRITE(sc);
703d27ba308SAdrian Chadd 
704d27ba308SAdrian Chadd 	return (0);
705d27ba308SAdrian Chadd }
706d27ba308SAdrian Chadd 
707d27ba308SAdrian Chadd /*
708d27ba308SAdrian Chadd  * ACK that we already have serviced the output FIFO.
709d27ba308SAdrian Chadd  */
710d27ba308SAdrian Chadd int
qcom_spi_hw_ack_write_pio_fifo(struct qcom_spi_softc * sc)711d27ba308SAdrian Chadd qcom_spi_hw_ack_write_pio_fifo(struct qcom_spi_softc *sc)
712d27ba308SAdrian Chadd {
713d27ba308SAdrian Chadd 
714d27ba308SAdrian Chadd 	QCOM_SPI_ASSERT_LOCKED(sc);
715d27ba308SAdrian Chadd 	QCOM_SPI_WRITE_4(sc, QUP_OPERATIONAL, QUP_OP_OUT_SERVICE_FLAG);
716d27ba308SAdrian Chadd 	QCOM_SPI_BARRIER_WRITE(sc);
717d27ba308SAdrian Chadd 	return (0);
718d27ba308SAdrian Chadd }
719d27ba308SAdrian Chadd 
720d27ba308SAdrian Chadd int
qcom_spi_hw_ack_opmode(struct qcom_spi_softc * sc)721d27ba308SAdrian Chadd qcom_spi_hw_ack_opmode(struct qcom_spi_softc *sc)
722d27ba308SAdrian Chadd {
723d27ba308SAdrian Chadd 
724d27ba308SAdrian Chadd 	QCOM_SPI_ASSERT_LOCKED(sc);
725d27ba308SAdrian Chadd 
726d27ba308SAdrian Chadd 	QCOM_SPI_BARRIER_READ(sc);
727633d178cSAdrian Chadd 	QCOM_SPI_READ_4(sc, QUP_OPERATIONAL);
728d27ba308SAdrian Chadd 	QCOM_SPI_WRITE_4(sc, QUP_OPERATIONAL, QUP_OP_OUT_SERVICE_FLAG);
729d27ba308SAdrian Chadd 	QCOM_SPI_BARRIER_WRITE(sc);
730633d178cSAdrian Chadd 
731d27ba308SAdrian Chadd 	return (0);
732d27ba308SAdrian Chadd 
733d27ba308SAdrian Chadd }
734d27ba308SAdrian Chadd 
735d27ba308SAdrian Chadd /*
736d27ba308SAdrian Chadd  * Read the value from the TX buffer into the given 32 bit DWORD,
737d27ba308SAdrian Chadd  * pre-shifting it into the place requested.
738d27ba308SAdrian Chadd  *
739d27ba308SAdrian Chadd  * Returns true if there was a byte available, false otherwise.
740d27ba308SAdrian Chadd  */
741d27ba308SAdrian Chadd static bool
qcom_spi_hw_write_from_tx_buf(struct qcom_spi_softc * sc,int shift,uint32_t * val)742d27ba308SAdrian Chadd qcom_spi_hw_write_from_tx_buf(struct qcom_spi_softc *sc, int shift,
743d27ba308SAdrian Chadd     uint32_t *val)
744d27ba308SAdrian Chadd {
745d27ba308SAdrian Chadd 
746d27ba308SAdrian Chadd 	QCOM_SPI_ASSERT_LOCKED(sc);
747d27ba308SAdrian Chadd 
748d27ba308SAdrian Chadd 	if (sc->transfer.tx_buf == NULL)
749d27ba308SAdrian Chadd 		return false;
750d27ba308SAdrian Chadd 
751d27ba308SAdrian Chadd 	if (sc->transfer.tx_offset < sc->transfer.tx_len) {
752d27ba308SAdrian Chadd 		*val |= (sc->transfer.tx_buf[sc->transfer.tx_offset] & 0xff)
753d27ba308SAdrian Chadd 		    << shift;
754d27ba308SAdrian Chadd 		sc->transfer.tx_offset++;
755d27ba308SAdrian Chadd 		return true;
756d27ba308SAdrian Chadd 	}
757d27ba308SAdrian Chadd 
758d27ba308SAdrian Chadd 	return false;
759d27ba308SAdrian Chadd }
760d27ba308SAdrian Chadd 
761d27ba308SAdrian Chadd int
qcom_spi_hw_write_pio_fifo(struct qcom_spi_softc * sc)762d27ba308SAdrian Chadd qcom_spi_hw_write_pio_fifo(struct qcom_spi_softc *sc)
763d27ba308SAdrian Chadd {
764d27ba308SAdrian Chadd 	uint32_t i;
765d27ba308SAdrian Chadd 	int num_bytes = 0;
766d27ba308SAdrian Chadd 
767d27ba308SAdrian Chadd 	QCOM_SPI_ASSERT_LOCKED(sc);
768d27ba308SAdrian Chadd 
769d27ba308SAdrian Chadd 	QCOM_SPI_WRITE_4(sc, QUP_OPERATIONAL, QUP_OP_OUT_SERVICE_FLAG);
770d27ba308SAdrian Chadd 	QCOM_SPI_BARRIER_WRITE(sc);
771d27ba308SAdrian Chadd 
772d27ba308SAdrian Chadd 	/*
773d27ba308SAdrian Chadd 	 * Loop over the transfer num_words, do complain if we are full.
774d27ba308SAdrian Chadd 	 */
775d27ba308SAdrian Chadd 	for (i = 0; i < sc->transfer.num_words; i++) {
776d27ba308SAdrian Chadd 		uint32_t reg;
777d27ba308SAdrian Chadd 
778d27ba308SAdrian Chadd 		/* Break if FIFO is full */
779d27ba308SAdrian Chadd 		if ((QCOM_SPI_READ_4(sc, QUP_OPERATIONAL)
780d27ba308SAdrian Chadd 		    & QUP_OP_OUT_FIFO_FULL) != 0) {
781d27ba308SAdrian Chadd 			device_printf(sc->sc_dev, "%s: FIFO full\n", __func__);
782d27ba308SAdrian Chadd 			break;
783d27ba308SAdrian Chadd 		}
784d27ba308SAdrian Chadd 
785d27ba308SAdrian Chadd 		/*
786d27ba308SAdrian Chadd 		 * Handle 1, 2, 4 byte transfer packing rules.
787d27ba308SAdrian Chadd 		 *
788d27ba308SAdrian Chadd 		 * Unlike read, where the shifting is done towards the MSB
789d27ba308SAdrian Chadd 		 * for us by default, we have to do it ourselves for transmit.
790d27ba308SAdrian Chadd 		 * There's a bit that one can set to do the preshifting
791d27ba308SAdrian Chadd 		 * (and u-boot uses it!) but I'll stick with what Linux is
792d27ba308SAdrian Chadd 		 * doing to make it easier for future maintenance.
793d27ba308SAdrian Chadd 		 *
794d27ba308SAdrian Chadd 		 * The format is the same as 4 byte RX - 0xaabbccdd;
795d27ba308SAdrian Chadd 		 * the byte ordering on the wire being aa, bb, cc, dd.
796d27ba308SAdrian Chadd 		 */
797d27ba308SAdrian Chadd 		reg = 0;
798d27ba308SAdrian Chadd 		if (sc->state.transfer_word_size == 1) {
799d27ba308SAdrian Chadd 			if (qcom_spi_hw_write_from_tx_buf(sc, 24, &reg))
800d27ba308SAdrian Chadd 				num_bytes++;
801d27ba308SAdrian Chadd 		} else if (sc->state.transfer_word_size == 2) {
802d27ba308SAdrian Chadd 			if (qcom_spi_hw_write_from_tx_buf(sc, 24, &reg))
803d27ba308SAdrian Chadd 				num_bytes++;
804d27ba308SAdrian Chadd 			if (qcom_spi_hw_write_from_tx_buf(sc, 16, &reg))
805d27ba308SAdrian Chadd 				num_bytes++;
806d27ba308SAdrian Chadd 		} else if (sc->state.transfer_word_size == 4) {
807d27ba308SAdrian Chadd 			if (qcom_spi_hw_write_from_tx_buf(sc, 24, &reg))
808d27ba308SAdrian Chadd 				num_bytes++;
809d27ba308SAdrian Chadd 			if (qcom_spi_hw_write_from_tx_buf(sc, 16, &reg))
810d27ba308SAdrian Chadd 				num_bytes++;
811d27ba308SAdrian Chadd 			if (qcom_spi_hw_write_from_tx_buf(sc, 8, &reg))
812d27ba308SAdrian Chadd 				num_bytes++;
813d27ba308SAdrian Chadd 			if (qcom_spi_hw_write_from_tx_buf(sc, 0, &reg))
814d27ba308SAdrian Chadd 				num_bytes++;
815d27ba308SAdrian Chadd 		}
816d27ba308SAdrian Chadd 
817d27ba308SAdrian Chadd 		/*
818d27ba308SAdrian Chadd 		 * always shift out something in case we need phantom
819d27ba308SAdrian Chadd 		 * writes to finish things up whilst we read a reply
820d27ba308SAdrian Chadd 		 * payload.
821d27ba308SAdrian Chadd 		 */
822d27ba308SAdrian Chadd 		QCOM_SPI_WRITE_4(sc, QUP_OUTPUT_FIFO, reg);
823d27ba308SAdrian Chadd 		QCOM_SPI_BARRIER_WRITE(sc);
824d27ba308SAdrian Chadd 	}
825d27ba308SAdrian Chadd 
826d27ba308SAdrian Chadd 	QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_TX_FIFO,
827d27ba308SAdrian Chadd 	    "%s: wrote %d bytes (%d fifo slots)\n",
828d27ba308SAdrian Chadd 	    __func__, num_bytes, sc->transfer.num_words);
829d27ba308SAdrian Chadd 
830d27ba308SAdrian Chadd 	return (0);
831d27ba308SAdrian Chadd }
832d27ba308SAdrian Chadd 
833d27ba308SAdrian Chadd int
qcom_spi_hw_write_pio_block(struct qcom_spi_softc * sc)834d27ba308SAdrian Chadd qcom_spi_hw_write_pio_block(struct qcom_spi_softc *sc)
835d27ba308SAdrian Chadd {
836d27ba308SAdrian Chadd 	/* Not yet implemented */
837d27ba308SAdrian Chadd 	return (ENXIO);
838d27ba308SAdrian Chadd }
839d27ba308SAdrian Chadd 
840d27ba308SAdrian Chadd /*
84179d93912SGordon Bergling  * Read data into the RX buffer and increment the RX offset.
842d27ba308SAdrian Chadd  *
843d27ba308SAdrian Chadd  * Return true if the byte was saved into the RX buffer, else
844d27ba308SAdrian Chadd  * return false.
845d27ba308SAdrian Chadd  */
846d27ba308SAdrian Chadd static bool
qcom_spi_hw_read_into_rx_buf(struct qcom_spi_softc * sc,uint8_t val)847d27ba308SAdrian Chadd qcom_spi_hw_read_into_rx_buf(struct qcom_spi_softc *sc, uint8_t val)
848d27ba308SAdrian Chadd {
849d27ba308SAdrian Chadd 	QCOM_SPI_ASSERT_LOCKED(sc);
850d27ba308SAdrian Chadd 
851d27ba308SAdrian Chadd 	if (sc->transfer.rx_buf == NULL)
852d27ba308SAdrian Chadd 		return false;
853d27ba308SAdrian Chadd 
854d27ba308SAdrian Chadd 	/* Make sure we aren't overflowing the receive buffer */
855d27ba308SAdrian Chadd 	if (sc->transfer.rx_offset < sc->transfer.rx_len) {
856d27ba308SAdrian Chadd 		sc->transfer.rx_buf[sc->transfer.rx_offset] = val;
857d27ba308SAdrian Chadd 		sc->transfer.rx_offset++;
858d27ba308SAdrian Chadd 		return true;
859d27ba308SAdrian Chadd 	}
860d27ba308SAdrian Chadd 	return false;
861d27ba308SAdrian Chadd }
862d27ba308SAdrian Chadd 
863d27ba308SAdrian Chadd /*
864d27ba308SAdrian Chadd  * Read "n_words" transfers, and push those bytes into the receive buffer.
865d27ba308SAdrian Chadd  * Make sure we have enough space, and make sure we don't overflow the
866d27ba308SAdrian Chadd  * read buffer size too!
867d27ba308SAdrian Chadd  */
868d27ba308SAdrian Chadd int
qcom_spi_hw_read_pio_fifo(struct qcom_spi_softc * sc)869d27ba308SAdrian Chadd qcom_spi_hw_read_pio_fifo(struct qcom_spi_softc *sc)
870d27ba308SAdrian Chadd {
871d27ba308SAdrian Chadd 	uint32_t i;
872d27ba308SAdrian Chadd 	uint32_t reg;
873d27ba308SAdrian Chadd 	int num_bytes = 0;
874d27ba308SAdrian Chadd 
875d27ba308SAdrian Chadd 	QCOM_SPI_ASSERT_LOCKED(sc);
876d27ba308SAdrian Chadd 
877d27ba308SAdrian Chadd 	QCOM_SPI_WRITE_4(sc, QUP_OPERATIONAL, QUP_OP_IN_SERVICE_FLAG);
878d27ba308SAdrian Chadd 	QCOM_SPI_BARRIER_WRITE(sc);
879d27ba308SAdrian Chadd 
880d27ba308SAdrian Chadd 	for (i = 0; i < sc->transfer.num_words; i++) {
881d27ba308SAdrian Chadd 		/* Break if FIFO is empty */
882d27ba308SAdrian Chadd 		QCOM_SPI_BARRIER_READ(sc);
883d27ba308SAdrian Chadd 		reg = QCOM_SPI_READ_4(sc, QUP_OPERATIONAL);
884d27ba308SAdrian Chadd 		if ((reg & QUP_OP_IN_FIFO_NOT_EMPTY) == 0) {
885d27ba308SAdrian Chadd 			device_printf(sc->sc_dev, "%s: FIFO empty\n", __func__);
886d27ba308SAdrian Chadd 			break;
887d27ba308SAdrian Chadd 		}
888d27ba308SAdrian Chadd 
889d27ba308SAdrian Chadd 		/*
890d27ba308SAdrian Chadd 		 * Always read num_words up to FIFO being non-empty; that way
891d27ba308SAdrian Chadd 		 * if we have mis-matching TX/RX buffer sizes for some reason
892d27ba308SAdrian Chadd 		 * we will read the needed phantom bytes.
893d27ba308SAdrian Chadd 		 */
894d27ba308SAdrian Chadd 		reg = QCOM_SPI_READ_4(sc, QUP_INPUT_FIFO);
895d27ba308SAdrian Chadd 
896d27ba308SAdrian Chadd 		/*
897d27ba308SAdrian Chadd 		 * Unpack the receive buffer based on whether we are
898d27ba308SAdrian Chadd 		 * doing 1, 2, or 4 byte transfer words.
899d27ba308SAdrian Chadd 		 */
900d27ba308SAdrian Chadd 		if (sc->state.transfer_word_size == 1) {
901d27ba308SAdrian Chadd 			if (qcom_spi_hw_read_into_rx_buf(sc, reg & 0xff))
902d27ba308SAdrian Chadd 				num_bytes++;
903d27ba308SAdrian Chadd 		} else if (sc->state.transfer_word_size == 2) {
904d27ba308SAdrian Chadd 			if (qcom_spi_hw_read_into_rx_buf(sc, (reg >> 8) & 0xff))
905d27ba308SAdrian Chadd 				num_bytes++;
906d27ba308SAdrian Chadd 			if (qcom_spi_hw_read_into_rx_buf(sc, reg & 0xff))
907d27ba308SAdrian Chadd 				num_bytes++;
908d27ba308SAdrian Chadd 		} else if (sc->state.transfer_word_size == 4) {
909d27ba308SAdrian Chadd 			if (qcom_spi_hw_read_into_rx_buf(sc, (reg >> 24) & 0xff))
910d27ba308SAdrian Chadd 				num_bytes++;
911d27ba308SAdrian Chadd 			if (qcom_spi_hw_read_into_rx_buf(sc, (reg >> 16) & 0xff))
912d27ba308SAdrian Chadd 				num_bytes++;
913d27ba308SAdrian Chadd 			if (qcom_spi_hw_read_into_rx_buf(sc, (reg >> 8) & 0xff))
914d27ba308SAdrian Chadd 				num_bytes++;
915d27ba308SAdrian Chadd 			if (qcom_spi_hw_read_into_rx_buf(sc, reg & 0xff))
916d27ba308SAdrian Chadd 				num_bytes++;
917d27ba308SAdrian Chadd 		}
918d27ba308SAdrian Chadd 	}
919d27ba308SAdrian Chadd 
920d27ba308SAdrian Chadd 	QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_TX_FIFO,
921d27ba308SAdrian Chadd 	    "%s: read %d bytes (%d transfer words)\n",
922d27ba308SAdrian Chadd 	    __func__, num_bytes, sc->transfer.num_words);
923d27ba308SAdrian Chadd 
924d27ba308SAdrian Chadd #if 0
925d27ba308SAdrian Chadd 	/*
926d27ba308SAdrian Chadd 	 * This is a no-op for FIFO mode, it's only a thing for BLOCK
927d27ba308SAdrian Chadd 	 * transfers.
928d27ba308SAdrian Chadd 	 */
929d27ba308SAdrian Chadd 	QCOM_SPI_BARRIER_READ(sc);
930d27ba308SAdrian Chadd 	reg = QCOM_SPI_READ_4(sc, QUP_OPERATIONAL);
931d27ba308SAdrian Chadd 	if (reg & QUP_OP_MAX_INPUT_DONE_FLAG) {
932d27ba308SAdrian Chadd 		device_printf(sc->sc_dev, "%s: read complete (DONE)\n" ,
933d27ba308SAdrian Chadd 		    __func__);
934d27ba308SAdrian Chadd 		sc->intr.done = true;
935d27ba308SAdrian Chadd 	}
936d27ba308SAdrian Chadd #endif
937d27ba308SAdrian Chadd 
938d27ba308SAdrian Chadd #if 0
939d27ba308SAdrian Chadd 	/*
940d27ba308SAdrian Chadd 	 * And see if we've finished the transfer and won't be getting
941d27ba308SAdrian Chadd 	 * any more.  Then treat it as done as well.
942d27ba308SAdrian Chadd 	 *
943d27ba308SAdrian Chadd 	 * In FIFO only mode we don't get a completion interrupt;
944d27ba308SAdrian Chadd 	 * we get an interrupt when the FIFO has enough data present.
945d27ba308SAdrian Chadd 	 */
946d27ba308SAdrian Chadd 	if ((sc->state.transfer_mode == QUP_IO_M_MODE_FIFO)
947d27ba308SAdrian Chadd 	    && (sc->transfer.rx_offset >= sc->transfer.rx_len)) {
948d27ba308SAdrian Chadd 		device_printf(sc->sc_dev, "%s: read complete (rxlen)\n",
949d27ba308SAdrian Chadd 		    __func__);
950d27ba308SAdrian Chadd 		sc->intr.done = true;
951d27ba308SAdrian Chadd 	}
952d27ba308SAdrian Chadd #endif
953d27ba308SAdrian Chadd 
954d27ba308SAdrian Chadd 	/*
955d27ba308SAdrian Chadd 	 * For FIFO transfers we get a /single/ result that complete
956d27ba308SAdrian Chadd 	 * the FIFO transfer.  We won't get any subsequent transfers;
957d27ba308SAdrian Chadd 	 * we'll need to schedule a new FIFO transfer.
958d27ba308SAdrian Chadd 	 */
959d27ba308SAdrian Chadd 	sc->intr.done = true;
960d27ba308SAdrian Chadd 
961d27ba308SAdrian Chadd 	return (0);
962d27ba308SAdrian Chadd }
963d27ba308SAdrian Chadd 
964d27ba308SAdrian Chadd int
qcom_spi_hw_read_pio_block(struct qcom_spi_softc * sc)965d27ba308SAdrian Chadd qcom_spi_hw_read_pio_block(struct qcom_spi_softc *sc)
966d27ba308SAdrian Chadd {
967d27ba308SAdrian Chadd 
968d27ba308SAdrian Chadd 	/* Not yet implemented */
969d27ba308SAdrian Chadd 	return (ENXIO);
970d27ba308SAdrian Chadd }
971d27ba308SAdrian Chadd 
972d27ba308SAdrian Chadd int
qcom_spi_hw_do_full_reset(struct qcom_spi_softc * sc)973d27ba308SAdrian Chadd qcom_spi_hw_do_full_reset(struct qcom_spi_softc *sc)
974d27ba308SAdrian Chadd {
975d27ba308SAdrian Chadd 	QCOM_SPI_ASSERT_LOCKED(sc);
976d27ba308SAdrian Chadd 
977d27ba308SAdrian Chadd 	QCOM_SPI_WRITE_4(sc, QUP_SW_RESET, 1);
978d27ba308SAdrian Chadd 	QCOM_SPI_BARRIER_WRITE(sc);
979d27ba308SAdrian Chadd 	DELAY(100);
980d27ba308SAdrian Chadd 
981d27ba308SAdrian Chadd 	return (0);
982d27ba308SAdrian Chadd }
983