1 /*- 2 * Copyright (c) 2017-2018, Rubicon Communications, LLC (Netgate) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * 25 */ 26 27 #include <sys/cdefs.h> 28 #include <sys/param.h> 29 #include <sys/systm.h> 30 #include <sys/bus.h> 31 32 #include <sys/kernel.h> 33 #include <sys/lock.h> 34 #include <sys/module.h> 35 #include <sys/mutex.h> 36 #include <sys/rman.h> 37 38 #include <machine/bus.h> 39 #include <machine/resource.h> 40 #include <machine/intr.h> 41 42 #include <dev/ofw/ofw_bus.h> 43 #include <dev/ofw/ofw_bus_subr.h> 44 #include <dev/spibus/spi.h> 45 #include <dev/spibus/spibusvar.h> 46 47 #include <arm/mv/mvvar.h> 48 49 #include "spibus_if.h" 50 51 struct mv_spi_softc { 52 device_t sc_dev; 53 struct mtx sc_mtx; 54 struct resource *sc_mem_res; 55 struct resource *sc_irq_res; 56 struct spi_command *sc_cmd; 57 bus_space_tag_t sc_bst; 58 bus_space_handle_t sc_bsh; 59 uint32_t sc_len; 60 uint32_t sc_read; 61 uint32_t sc_flags; 62 uint32_t sc_written; 63 void *sc_intrhand; 64 }; 65 66 #define MV_SPI_BUSY 0x1 67 #define MV_SPI_WRITE(_sc, _off, _val) \ 68 bus_space_write_4((_sc)->sc_bst, (_sc)->sc_bsh, (_off), (_val)) 69 #define MV_SPI_READ(_sc, _off) \ 70 bus_space_read_4((_sc)->sc_bst, (_sc)->sc_bsh, (_off)) 71 #define MV_SPI_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) 72 #define MV_SPI_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) 73 74 #define MV_SPI_CONTROL 0 75 #define MV_SPI_CTRL_CS_MASK 7 76 #define MV_SPI_CTRL_CS_SHIFT 2 77 #define MV_SPI_CTRL_SMEMREADY (1 << 1) 78 #define MV_SPI_CTRL_CS_ACTIVE (1 << 0) 79 #define MV_SPI_CONF 0x4 80 #define MV_SPI_CONF_MODE_SHIFT 12 81 #define MV_SPI_CONF_MODE_MASK (3 << MV_SPI_CONF_MODE_SHIFT) 82 #define MV_SPI_CONF_BYTELEN (1 << 5) 83 #define MV_SPI_CONF_CLOCK_SPR_MASK 0xf 84 #define MV_SPI_CONF_CLOCK_SPPR_MASK 1 85 #define MV_SPI_CONF_CLOCK_SPPR_SHIFT 4 86 #define MV_SPI_CONF_CLOCK_SPPRHI_MASK 3 87 #define MV_SPI_CONF_CLOCK_SPPRHI_SHIFT 6 88 #define MV_SPI_CONF_CLOCK_MASK \ 89 ((MV_SPI_CONF_CLOCK_SPPRHI_MASK << MV_SPI_CONF_CLOCK_SPPRHI_SHIFT) | \ 90 (MV_SPI_CONF_CLOCK_SPPR_MASK << MV_SPI_CONF_CLOCK_SPPR_SHIFT) | \ 91 MV_SPI_CONF_CLOCK_SPR_MASK) 92 #define MV_SPI_DATAOUT 0x8 93 #define MV_SPI_DATAIN 0xc 94 #define MV_SPI_INTR_STAT 0x10 95 #define MV_SPI_INTR_MASK 0x14 96 #define MV_SPI_INTR_SMEMREADY (1 << 0) 97 98 static struct ofw_compat_data compat_data[] = { 99 {"marvell,armada-380-spi", 1}, 100 {NULL, 0} 101 }; 102 103 static void mv_spi_intr(void *); 104 105 static int 106 mv_spi_probe(device_t dev) 107 { 108 109 if (!ofw_bus_status_okay(dev)) 110 return (ENXIO); 111 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) 112 return (ENXIO); 113 114 device_set_desc(dev, "Marvell SPI controller"); 115 116 return (BUS_PROBE_DEFAULT); 117 } 118 119 static int 120 mv_spi_attach(device_t dev) 121 { 122 struct mv_spi_softc *sc; 123 int rid; 124 uint32_t reg; 125 126 sc = device_get_softc(dev); 127 sc->sc_dev = dev; 128 129 rid = 0; 130 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 131 RF_ACTIVE); 132 if (!sc->sc_mem_res) { 133 device_printf(dev, "cannot allocate memory window\n"); 134 return (ENXIO); 135 } 136 137 sc->sc_bst = rman_get_bustag(sc->sc_mem_res); 138 sc->sc_bsh = rman_get_bushandle(sc->sc_mem_res); 139 140 rid = 0; 141 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 142 RF_ACTIVE); 143 if (!sc->sc_irq_res) { 144 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); 145 device_printf(dev, "cannot allocate interrupt\n"); 146 return (ENXIO); 147 } 148 149 /* Deactivate the bus - just in case... */ 150 reg = MV_SPI_READ(sc, MV_SPI_CONTROL); 151 MV_SPI_WRITE(sc, MV_SPI_CONTROL, reg & ~MV_SPI_CTRL_CS_ACTIVE); 152 153 /* Disable the two bytes FIFO. */ 154 reg = MV_SPI_READ(sc, MV_SPI_CONF); 155 MV_SPI_WRITE(sc, MV_SPI_CONF, reg & ~MV_SPI_CONF_BYTELEN); 156 157 /* Clear and disable interrupts. */ 158 MV_SPI_WRITE(sc, MV_SPI_INTR_MASK, 0); 159 MV_SPI_WRITE(sc, MV_SPI_INTR_STAT, 0); 160 161 /* Hook up our interrupt handler. */ 162 if (bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_MISC | INTR_MPSAFE, 163 NULL, mv_spi_intr, sc, &sc->sc_intrhand)) { 164 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); 165 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); 166 device_printf(dev, "cannot setup the interrupt handler\n"); 167 return (ENXIO); 168 } 169 170 mtx_init(&sc->sc_mtx, "mv_spi", NULL, MTX_DEF); 171 172 device_add_child(dev, "spibus", -1); 173 174 /* Probe and attach the spibus when interrupts are available. */ 175 return (bus_delayed_attach_children(dev)); 176 } 177 178 static int 179 mv_spi_detach(device_t dev) 180 { 181 struct mv_spi_softc *sc; 182 183 bus_generic_detach(dev); 184 185 sc = device_get_softc(dev); 186 mtx_destroy(&sc->sc_mtx); 187 if (sc->sc_intrhand) 188 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_intrhand); 189 if (sc->sc_irq_res) 190 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); 191 if (sc->sc_mem_res) 192 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); 193 194 return (0); 195 } 196 197 static __inline void 198 mv_spi_rx_byte(struct mv_spi_softc *sc) 199 { 200 struct spi_command *cmd; 201 uint32_t read; 202 uint8_t *p; 203 204 cmd = sc->sc_cmd; 205 p = (uint8_t *)cmd->rx_cmd; 206 read = sc->sc_read++; 207 if (read >= cmd->rx_cmd_sz) { 208 p = (uint8_t *)cmd->rx_data; 209 read -= cmd->rx_cmd_sz; 210 } 211 p[read] = MV_SPI_READ(sc, MV_SPI_DATAIN) & 0xff; 212 } 213 214 static __inline void 215 mv_spi_tx_byte(struct mv_spi_softc *sc) 216 { 217 struct spi_command *cmd; 218 uint32_t written; 219 uint8_t *p; 220 221 cmd = sc->sc_cmd; 222 p = (uint8_t *)cmd->tx_cmd; 223 written = sc->sc_written++; 224 if (written >= cmd->tx_cmd_sz) { 225 p = (uint8_t *)cmd->tx_data; 226 written -= cmd->tx_cmd_sz; 227 } 228 MV_SPI_WRITE(sc, MV_SPI_DATAOUT, p[written]); 229 } 230 231 static void 232 mv_spi_intr(void *arg) 233 { 234 struct mv_spi_softc *sc; 235 236 sc = (struct mv_spi_softc *)arg; 237 MV_SPI_LOCK(sc); 238 239 /* Filter stray interrupts. */ 240 if ((sc->sc_flags & MV_SPI_BUSY) == 0) { 241 MV_SPI_UNLOCK(sc); 242 return; 243 } 244 245 /* RX */ 246 mv_spi_rx_byte(sc); 247 248 /* TX */ 249 mv_spi_tx_byte(sc); 250 251 /* Check for end of transfer. */ 252 if (sc->sc_written == sc->sc_len && sc->sc_read == sc->sc_len) 253 wakeup(sc->sc_dev); 254 255 MV_SPI_UNLOCK(sc); 256 } 257 258 static int 259 mv_spi_psc_calc(uint32_t clock, uint32_t *spr, uint32_t *sppr) 260 { 261 uint32_t divider, tclk; 262 263 tclk = get_tclk_armada38x(); 264 for (*spr = 2; *spr <= 15; (*spr)++) { 265 for (*sppr = 0; *sppr <= 7; (*sppr)++) { 266 divider = *spr * (1 << *sppr); 267 if (tclk / divider <= clock) 268 return (0); 269 } 270 } 271 272 return (EINVAL); 273 } 274 275 static int 276 mv_spi_transfer(device_t dev, device_t child, struct spi_command *cmd) 277 { 278 struct mv_spi_softc *sc; 279 uint32_t clock, cs, mode, reg, spr, sppr; 280 int resid, timeout; 281 282 KASSERT(cmd->tx_cmd_sz == cmd->rx_cmd_sz, 283 ("TX/RX command sizes should be equal")); 284 KASSERT(cmd->tx_data_sz == cmd->rx_data_sz, 285 ("TX/RX data sizes should be equal")); 286 287 /* Get the proper chip select, mode and clock for this transfer. */ 288 spibus_get_cs(child, &cs); 289 cs &= ~SPIBUS_CS_HIGH; 290 spibus_get_mode(child, &mode); 291 if (mode > 3) { 292 device_printf(dev, 293 "Invalid mode %u requested by %s\n", mode, 294 device_get_nameunit(child)); 295 return (EINVAL); 296 } 297 spibus_get_clock(child, &clock); 298 if (clock == 0 || mv_spi_psc_calc(clock, &spr, &sppr) != 0) { 299 device_printf(dev, 300 "Invalid clock %uHz requested by %s\n", clock, 301 device_get_nameunit(child)); 302 return (EINVAL); 303 } 304 305 sc = device_get_softc(dev); 306 MV_SPI_LOCK(sc); 307 308 /* Wait until the controller is free. */ 309 while (sc->sc_flags & MV_SPI_BUSY) 310 mtx_sleep(dev, &sc->sc_mtx, 0, "mv_spi", 0); 311 312 /* Now we have control over SPI controller. */ 313 sc->sc_flags = MV_SPI_BUSY; 314 315 /* Save a pointer to the SPI command. */ 316 sc->sc_cmd = cmd; 317 sc->sc_read = 0; 318 sc->sc_written = 0; 319 sc->sc_len = cmd->tx_cmd_sz + cmd->tx_data_sz; 320 321 /* Set SPI Mode and Clock. */ 322 reg = MV_SPI_READ(sc, MV_SPI_CONF); 323 reg &= ~(MV_SPI_CONF_MODE_MASK | MV_SPI_CONF_CLOCK_MASK); 324 reg |= mode << MV_SPI_CONF_MODE_SHIFT; 325 reg |= spr & MV_SPI_CONF_CLOCK_SPR_MASK; 326 reg |= (sppr & MV_SPI_CONF_CLOCK_SPPR_MASK) << 327 MV_SPI_CONF_CLOCK_SPPR_SHIFT; 328 reg |= (sppr & MV_SPI_CONF_CLOCK_SPPRHI_MASK) << 329 MV_SPI_CONF_CLOCK_SPPRHI_SHIFT; 330 MV_SPI_WRITE(sc, MV_SPI_CONTROL, reg); 331 332 /* Set CS number and assert CS. */ 333 reg = (cs & MV_SPI_CTRL_CS_MASK) << MV_SPI_CTRL_CS_SHIFT; 334 MV_SPI_WRITE(sc, MV_SPI_CONTROL, reg); 335 reg = MV_SPI_READ(sc, MV_SPI_CONTROL); 336 MV_SPI_WRITE(sc, MV_SPI_CONTROL, reg | MV_SPI_CTRL_CS_ACTIVE); 337 338 while ((resid = sc->sc_len - sc->sc_written) > 0) { 339 MV_SPI_WRITE(sc, MV_SPI_INTR_STAT, 0); 340 341 /* 342 * Write to start the transmission and read the byte 343 * back when ready. 344 */ 345 mv_spi_tx_byte(sc); 346 timeout = 1000; 347 while (--timeout > 0) { 348 reg = MV_SPI_READ(sc, MV_SPI_CONTROL); 349 if (reg & MV_SPI_CTRL_SMEMREADY) 350 break; 351 DELAY(1); 352 } 353 if (timeout == 0) 354 break; 355 mv_spi_rx_byte(sc); 356 } 357 358 /* Stop the controller. */ 359 reg = MV_SPI_READ(sc, MV_SPI_CONTROL); 360 MV_SPI_WRITE(sc, MV_SPI_CONTROL, reg & ~MV_SPI_CTRL_CS_ACTIVE); 361 MV_SPI_WRITE(sc, MV_SPI_INTR_MASK, 0); 362 MV_SPI_WRITE(sc, MV_SPI_INTR_STAT, 0); 363 364 /* Release the controller and wakeup the next thread waiting for it. */ 365 sc->sc_flags = 0; 366 wakeup_one(dev); 367 MV_SPI_UNLOCK(sc); 368 369 /* 370 * Check for transfer timeout. The SPI controller doesn't 371 * return errors. 372 */ 373 return ((timeout == 0) ? EIO : 0); 374 } 375 376 static phandle_t 377 mv_spi_get_node(device_t bus, device_t dev) 378 { 379 380 return (ofw_bus_get_node(bus)); 381 } 382 383 static device_method_t mv_spi_methods[] = { 384 /* Device interface */ 385 DEVMETHOD(device_probe, mv_spi_probe), 386 DEVMETHOD(device_attach, mv_spi_attach), 387 DEVMETHOD(device_detach, mv_spi_detach), 388 389 /* SPI interface */ 390 DEVMETHOD(spibus_transfer, mv_spi_transfer), 391 392 /* ofw_bus interface */ 393 DEVMETHOD(ofw_bus_get_node, mv_spi_get_node), 394 395 DEVMETHOD_END 396 }; 397 398 static driver_t mv_spi_driver = { 399 "spi", 400 mv_spi_methods, 401 sizeof(struct mv_spi_softc), 402 }; 403 404 DRIVER_MODULE(mv_spi, simplebus, mv_spi_driver, 0, 0); 405