1 /*- 2 * Copyright (c) 2017-2018, Rubicon Communications, LLC (Netgate) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * 25 */ 26 27 #include <sys/param.h> 28 #include <sys/systm.h> 29 #include <sys/bus.h> 30 31 #include <sys/kernel.h> 32 #include <sys/lock.h> 33 #include <sys/module.h> 34 #include <sys/mutex.h> 35 #include <sys/rman.h> 36 37 #include <machine/bus.h> 38 #include <machine/resource.h> 39 #include <machine/intr.h> 40 41 #include <dev/ofw/ofw_bus.h> 42 #include <dev/ofw/ofw_bus_subr.h> 43 #include <dev/spibus/spi.h> 44 #include <dev/spibus/spibusvar.h> 45 46 #include <arm/mv/mvvar.h> 47 48 #include "spibus_if.h" 49 50 struct mv_spi_softc { 51 device_t sc_dev; 52 struct mtx sc_mtx; 53 struct resource *sc_mem_res; 54 struct resource *sc_irq_res; 55 struct spi_command *sc_cmd; 56 bus_space_tag_t sc_bst; 57 bus_space_handle_t sc_bsh; 58 uint32_t sc_len; 59 uint32_t sc_read; 60 uint32_t sc_flags; 61 uint32_t sc_written; 62 void *sc_intrhand; 63 }; 64 65 #define MV_SPI_BUSY 0x1 66 #define MV_SPI_WRITE(_sc, _off, _val) \ 67 bus_space_write_4((_sc)->sc_bst, (_sc)->sc_bsh, (_off), (_val)) 68 #define MV_SPI_READ(_sc, _off) \ 69 bus_space_read_4((_sc)->sc_bst, (_sc)->sc_bsh, (_off)) 70 #define MV_SPI_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) 71 #define MV_SPI_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) 72 73 #define MV_SPI_CONTROL 0 74 #define MV_SPI_CTRL_CS_MASK 7 75 #define MV_SPI_CTRL_CS_SHIFT 2 76 #define MV_SPI_CTRL_SMEMREADY (1 << 1) 77 #define MV_SPI_CTRL_CS_ACTIVE (1 << 0) 78 #define MV_SPI_CONF 0x4 79 #define MV_SPI_CONF_MODE_SHIFT 12 80 #define MV_SPI_CONF_MODE_MASK (3 << MV_SPI_CONF_MODE_SHIFT) 81 #define MV_SPI_CONF_BYTELEN (1 << 5) 82 #define MV_SPI_CONF_CLOCK_SPR_MASK 0xf 83 #define MV_SPI_CONF_CLOCK_SPPR_MASK 1 84 #define MV_SPI_CONF_CLOCK_SPPR_SHIFT 4 85 #define MV_SPI_CONF_CLOCK_SPPRHI_MASK 3 86 #define MV_SPI_CONF_CLOCK_SPPRHI_SHIFT 6 87 #define MV_SPI_CONF_CLOCK_MASK \ 88 ((MV_SPI_CONF_CLOCK_SPPRHI_MASK << MV_SPI_CONF_CLOCK_SPPRHI_SHIFT) | \ 89 (MV_SPI_CONF_CLOCK_SPPR_MASK << MV_SPI_CONF_CLOCK_SPPR_SHIFT) | \ 90 MV_SPI_CONF_CLOCK_SPR_MASK) 91 #define MV_SPI_DATAOUT 0x8 92 #define MV_SPI_DATAIN 0xc 93 #define MV_SPI_INTR_STAT 0x10 94 #define MV_SPI_INTR_MASK 0x14 95 #define MV_SPI_INTR_SMEMREADY (1 << 0) 96 97 static struct ofw_compat_data compat_data[] = { 98 {"marvell,armada-380-spi", 1}, 99 {NULL, 0} 100 }; 101 102 static void mv_spi_intr(void *); 103 104 static int 105 mv_spi_probe(device_t dev) 106 { 107 108 if (!ofw_bus_status_okay(dev)) 109 return (ENXIO); 110 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) 111 return (ENXIO); 112 113 device_set_desc(dev, "Marvell SPI controller"); 114 115 return (BUS_PROBE_DEFAULT); 116 } 117 118 static int 119 mv_spi_attach(device_t dev) 120 { 121 struct mv_spi_softc *sc; 122 int rid; 123 uint32_t reg; 124 125 sc = device_get_softc(dev); 126 sc->sc_dev = dev; 127 128 rid = 0; 129 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 130 RF_ACTIVE); 131 if (!sc->sc_mem_res) { 132 device_printf(dev, "cannot allocate memory window\n"); 133 return (ENXIO); 134 } 135 136 sc->sc_bst = rman_get_bustag(sc->sc_mem_res); 137 sc->sc_bsh = rman_get_bushandle(sc->sc_mem_res); 138 139 rid = 0; 140 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 141 RF_ACTIVE); 142 if (!sc->sc_irq_res) { 143 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); 144 device_printf(dev, "cannot allocate interrupt\n"); 145 return (ENXIO); 146 } 147 148 /* Deactivate the bus - just in case... */ 149 reg = MV_SPI_READ(sc, MV_SPI_CONTROL); 150 MV_SPI_WRITE(sc, MV_SPI_CONTROL, reg & ~MV_SPI_CTRL_CS_ACTIVE); 151 152 /* Disable the two bytes FIFO. */ 153 reg = MV_SPI_READ(sc, MV_SPI_CONF); 154 MV_SPI_WRITE(sc, MV_SPI_CONF, reg & ~MV_SPI_CONF_BYTELEN); 155 156 /* Clear and disable interrupts. */ 157 MV_SPI_WRITE(sc, MV_SPI_INTR_MASK, 0); 158 MV_SPI_WRITE(sc, MV_SPI_INTR_STAT, 0); 159 160 /* Hook up our interrupt handler. */ 161 if (bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_MISC | INTR_MPSAFE, 162 NULL, mv_spi_intr, sc, &sc->sc_intrhand)) { 163 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); 164 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); 165 device_printf(dev, "cannot setup the interrupt handler\n"); 166 return (ENXIO); 167 } 168 169 mtx_init(&sc->sc_mtx, "mv_spi", NULL, MTX_DEF); 170 171 device_add_child(dev, "spibus", -1); 172 173 /* Probe and attach the spibus when interrupts are available. */ 174 return (bus_delayed_attach_children(dev)); 175 } 176 177 static int 178 mv_spi_detach(device_t dev) 179 { 180 struct mv_spi_softc *sc; 181 182 bus_generic_detach(dev); 183 184 sc = device_get_softc(dev); 185 mtx_destroy(&sc->sc_mtx); 186 if (sc->sc_intrhand) 187 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_intrhand); 188 if (sc->sc_irq_res) 189 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); 190 if (sc->sc_mem_res) 191 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); 192 193 return (0); 194 } 195 196 static __inline void 197 mv_spi_rx_byte(struct mv_spi_softc *sc) 198 { 199 struct spi_command *cmd; 200 uint32_t read; 201 uint8_t *p; 202 203 cmd = sc->sc_cmd; 204 p = (uint8_t *)cmd->rx_cmd; 205 read = sc->sc_read++; 206 if (read >= cmd->rx_cmd_sz) { 207 p = (uint8_t *)cmd->rx_data; 208 read -= cmd->rx_cmd_sz; 209 } 210 p[read] = MV_SPI_READ(sc, MV_SPI_DATAIN) & 0xff; 211 } 212 213 static __inline void 214 mv_spi_tx_byte(struct mv_spi_softc *sc) 215 { 216 struct spi_command *cmd; 217 uint32_t written; 218 uint8_t *p; 219 220 cmd = sc->sc_cmd; 221 p = (uint8_t *)cmd->tx_cmd; 222 written = sc->sc_written++; 223 if (written >= cmd->tx_cmd_sz) { 224 p = (uint8_t *)cmd->tx_data; 225 written -= cmd->tx_cmd_sz; 226 } 227 MV_SPI_WRITE(sc, MV_SPI_DATAOUT, p[written]); 228 } 229 230 static void 231 mv_spi_intr(void *arg) 232 { 233 struct mv_spi_softc *sc; 234 235 sc = (struct mv_spi_softc *)arg; 236 MV_SPI_LOCK(sc); 237 238 /* Filter stray interrupts. */ 239 if ((sc->sc_flags & MV_SPI_BUSY) == 0) { 240 MV_SPI_UNLOCK(sc); 241 return; 242 } 243 244 /* RX */ 245 mv_spi_rx_byte(sc); 246 247 /* TX */ 248 mv_spi_tx_byte(sc); 249 250 /* Check for end of transfer. */ 251 if (sc->sc_written == sc->sc_len && sc->sc_read == sc->sc_len) 252 wakeup(sc->sc_dev); 253 254 MV_SPI_UNLOCK(sc); 255 } 256 257 static int 258 mv_spi_psc_calc(uint32_t clock, uint32_t *spr, uint32_t *sppr) 259 { 260 uint32_t divider, tclk; 261 262 tclk = get_tclk_armada38x(); 263 for (*spr = 2; *spr <= 15; (*spr)++) { 264 for (*sppr = 0; *sppr <= 7; (*sppr)++) { 265 divider = *spr * (1 << *sppr); 266 if (tclk / divider <= clock) 267 return (0); 268 } 269 } 270 271 return (EINVAL); 272 } 273 274 static int 275 mv_spi_transfer(device_t dev, device_t child, struct spi_command *cmd) 276 { 277 struct mv_spi_softc *sc; 278 uint32_t clock, cs, mode, reg, spr, sppr; 279 int resid, timeout; 280 281 KASSERT(cmd->tx_cmd_sz == cmd->rx_cmd_sz, 282 ("TX/RX command sizes should be equal")); 283 KASSERT(cmd->tx_data_sz == cmd->rx_data_sz, 284 ("TX/RX data sizes should be equal")); 285 286 /* Get the proper chip select, mode and clock for this transfer. */ 287 spibus_get_cs(child, &cs); 288 cs &= ~SPIBUS_CS_HIGH; 289 spibus_get_mode(child, &mode); 290 if (mode > 3) { 291 device_printf(dev, 292 "Invalid mode %u requested by %s\n", mode, 293 device_get_nameunit(child)); 294 return (EINVAL); 295 } 296 spibus_get_clock(child, &clock); 297 if (clock == 0 || mv_spi_psc_calc(clock, &spr, &sppr) != 0) { 298 device_printf(dev, 299 "Invalid clock %uHz requested by %s\n", clock, 300 device_get_nameunit(child)); 301 return (EINVAL); 302 } 303 304 sc = device_get_softc(dev); 305 MV_SPI_LOCK(sc); 306 307 /* Wait until the controller is free. */ 308 while (sc->sc_flags & MV_SPI_BUSY) 309 mtx_sleep(dev, &sc->sc_mtx, 0, "mv_spi", 0); 310 311 /* Now we have control over SPI controller. */ 312 sc->sc_flags = MV_SPI_BUSY; 313 314 /* Save a pointer to the SPI command. */ 315 sc->sc_cmd = cmd; 316 sc->sc_read = 0; 317 sc->sc_written = 0; 318 sc->sc_len = cmd->tx_cmd_sz + cmd->tx_data_sz; 319 320 /* Set SPI Mode and Clock. */ 321 reg = MV_SPI_READ(sc, MV_SPI_CONF); 322 reg &= ~(MV_SPI_CONF_MODE_MASK | MV_SPI_CONF_CLOCK_MASK); 323 reg |= mode << MV_SPI_CONF_MODE_SHIFT; 324 reg |= spr & MV_SPI_CONF_CLOCK_SPR_MASK; 325 reg |= (sppr & MV_SPI_CONF_CLOCK_SPPR_MASK) << 326 MV_SPI_CONF_CLOCK_SPPR_SHIFT; 327 reg |= (sppr & MV_SPI_CONF_CLOCK_SPPRHI_MASK) << 328 MV_SPI_CONF_CLOCK_SPPRHI_SHIFT; 329 MV_SPI_WRITE(sc, MV_SPI_CONTROL, reg); 330 331 /* Set CS number and assert CS. */ 332 reg = (cs & MV_SPI_CTRL_CS_MASK) << MV_SPI_CTRL_CS_SHIFT; 333 MV_SPI_WRITE(sc, MV_SPI_CONTROL, reg); 334 reg = MV_SPI_READ(sc, MV_SPI_CONTROL); 335 MV_SPI_WRITE(sc, MV_SPI_CONTROL, reg | MV_SPI_CTRL_CS_ACTIVE); 336 337 while ((resid = sc->sc_len - sc->sc_written) > 0) { 338 MV_SPI_WRITE(sc, MV_SPI_INTR_STAT, 0); 339 340 /* 341 * Write to start the transmission and read the byte 342 * back when ready. 343 */ 344 mv_spi_tx_byte(sc); 345 timeout = 1000; 346 while (--timeout > 0) { 347 reg = MV_SPI_READ(sc, MV_SPI_CONTROL); 348 if (reg & MV_SPI_CTRL_SMEMREADY) 349 break; 350 DELAY(1); 351 } 352 if (timeout == 0) 353 break; 354 mv_spi_rx_byte(sc); 355 } 356 357 /* Stop the controller. */ 358 reg = MV_SPI_READ(sc, MV_SPI_CONTROL); 359 MV_SPI_WRITE(sc, MV_SPI_CONTROL, reg & ~MV_SPI_CTRL_CS_ACTIVE); 360 MV_SPI_WRITE(sc, MV_SPI_INTR_MASK, 0); 361 MV_SPI_WRITE(sc, MV_SPI_INTR_STAT, 0); 362 363 /* Release the controller and wakeup the next thread waiting for it. */ 364 sc->sc_flags = 0; 365 wakeup_one(dev); 366 MV_SPI_UNLOCK(sc); 367 368 /* 369 * Check for transfer timeout. The SPI controller doesn't 370 * return errors. 371 */ 372 return ((timeout == 0) ? EIO : 0); 373 } 374 375 static phandle_t 376 mv_spi_get_node(device_t bus, device_t dev) 377 { 378 379 return (ofw_bus_get_node(bus)); 380 } 381 382 static device_method_t mv_spi_methods[] = { 383 /* Device interface */ 384 DEVMETHOD(device_probe, mv_spi_probe), 385 DEVMETHOD(device_attach, mv_spi_attach), 386 DEVMETHOD(device_detach, mv_spi_detach), 387 388 /* SPI interface */ 389 DEVMETHOD(spibus_transfer, mv_spi_transfer), 390 391 /* ofw_bus interface */ 392 DEVMETHOD(ofw_bus_get_node, mv_spi_get_node), 393 394 DEVMETHOD_END 395 }; 396 397 static driver_t mv_spi_driver = { 398 "spi", 399 mv_spi_methods, 400 sizeof(struct mv_spi_softc), 401 }; 402 403 DRIVER_MODULE(mv_spi, simplebus, mv_spi_driver, 0, 0); 404