1 /*- 2 * Copyright (c) 2015 Brian Fundakowski Feldman. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 */ 24 25 #include <sys/cdefs.h> 26 #include "opt_platform.h" 27 #include "opt_spi.h" 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/bus.h> 32 #include <sys/conf.h> 33 #include <sys/kernel.h> 34 #include <sys/lock.h> 35 #include <sys/malloc.h> 36 #include <sys/mman.h> 37 #include <sys/mutex.h> 38 #include <sys/module.h> 39 #include <sys/proc.h> 40 #include <sys/rwlock.h> 41 #include <sys/spigenio.h> 42 #include <sys/types.h> 43 44 #include <vm/vm.h> 45 #include <vm/vm_extern.h> 46 #include <vm/vm_object.h> 47 #include <vm/vm_page.h> 48 #include <vm/vm_pager.h> 49 50 #include <dev/spibus/spi.h> 51 #include <dev/spibus/spibusvar.h> 52 53 #ifdef FDT 54 #include <dev/ofw/ofw_bus_subr.h> 55 56 static struct ofw_compat_data compat_data[] = { 57 {"freebsd,spigen", true}, 58 {NULL, false} 59 }; 60 61 #endif 62 63 #include "spibus_if.h" 64 65 struct spigen_softc { 66 device_t sc_dev; 67 struct cdev *sc_cdev; 68 #ifdef SPIGEN_LEGACY_CDEVNAME 69 struct cdev *sc_adev; /* alias device */ 70 #endif 71 struct mtx sc_mtx; 72 }; 73 74 struct spigen_mmap { 75 vm_object_t bufobj; 76 vm_offset_t kvaddr; 77 size_t bufsize; 78 }; 79 80 static int 81 spigen_probe(device_t dev) 82 { 83 int rv; 84 85 /* 86 * By default we only bid to attach if specifically added by our parent 87 * (usually via hint.spigen.#.at=busname). On FDT systems we bid as the 88 * default driver based on being configured in the FDT data. 89 */ 90 rv = BUS_PROBE_NOWILDCARD; 91 92 #ifdef FDT 93 if (ofw_bus_status_okay(dev) && 94 ofw_bus_search_compatible(dev, compat_data)->ocd_data) 95 rv = BUS_PROBE_DEFAULT; 96 #endif 97 98 device_set_desc(dev, "SPI Generic IO"); 99 100 return (rv); 101 } 102 103 static int spigen_open(struct cdev *, int, int, struct thread *); 104 static int spigen_ioctl(struct cdev *, u_long, caddr_t, int, struct thread *); 105 static int spigen_close(struct cdev *, int, int, struct thread *); 106 static d_mmap_single_t spigen_mmap_single; 107 108 static struct cdevsw spigen_cdevsw = { 109 .d_version = D_VERSION, 110 .d_name = "spigen", 111 .d_open = spigen_open, 112 .d_ioctl = spigen_ioctl, 113 .d_mmap_single = spigen_mmap_single, 114 .d_close = spigen_close 115 }; 116 117 static int 118 spigen_attach(device_t dev) 119 { 120 struct spigen_softc *sc; 121 const int unit = device_get_unit(dev); 122 int cs, res; 123 struct make_dev_args mda; 124 125 spibus_get_cs(dev, &cs); 126 cs &= ~SPIBUS_CS_HIGH; /* trim 'cs high' bit */ 127 128 sc = device_get_softc(dev); 129 sc->sc_dev = dev; 130 131 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF); 132 133 make_dev_args_init(&mda); 134 mda.mda_flags = MAKEDEV_WAITOK; 135 mda.mda_devsw = &spigen_cdevsw; 136 mda.mda_cr = NULL; 137 mda.mda_uid = UID_ROOT; 138 mda.mda_gid = GID_OPERATOR; 139 mda.mda_mode = 0660; 140 mda.mda_unit = unit; 141 mda.mda_si_drv1 = dev; 142 143 res = make_dev_s(&mda, &(sc->sc_cdev), "spigen%d.%d", 144 device_get_unit(device_get_parent(dev)), cs); 145 if (res) { 146 return res; 147 } 148 149 #ifdef SPIGEN_LEGACY_CDEVNAME 150 res = make_dev_alias_p(0, &sc->sc_adev, sc->sc_cdev, "spigen%d", unit); 151 if (res) { 152 if (sc->sc_cdev) { 153 destroy_dev(sc->sc_cdev); 154 sc->sc_cdev = NULL; 155 } 156 return res; 157 } 158 #endif 159 160 return (0); 161 } 162 163 static int 164 spigen_open(struct cdev *cdev, int oflags, int devtype, struct thread *td) 165 { 166 device_t dev; 167 struct spigen_softc *sc; 168 169 dev = cdev->si_drv1; 170 sc = device_get_softc(dev); 171 172 mtx_lock(&sc->sc_mtx); 173 device_busy(sc->sc_dev); 174 mtx_unlock(&sc->sc_mtx); 175 176 return (0); 177 } 178 179 static int 180 spigen_transfer(struct cdev *cdev, struct spigen_transfer *st) 181 { 182 struct spi_command transfer = SPI_COMMAND_INITIALIZER; 183 device_t dev = cdev->si_drv1; 184 int error = 0; 185 186 #if 0 187 device_printf(dev, "cmd %p %u data %p %u\n", st->st_command.iov_base, 188 st->st_command.iov_len, st->st_data.iov_base, st->st_data.iov_len); 189 #endif 190 191 if (st->st_command.iov_len == 0) 192 return (EINVAL); 193 194 transfer.tx_cmd = transfer.rx_cmd = malloc(st->st_command.iov_len, 195 M_DEVBUF, M_WAITOK); 196 if (st->st_data.iov_len > 0) { 197 transfer.tx_data = transfer.rx_data = malloc(st->st_data.iov_len, 198 M_DEVBUF, M_WAITOK); 199 } 200 else 201 transfer.tx_data = transfer.rx_data = NULL; 202 203 error = copyin(st->st_command.iov_base, transfer.tx_cmd, 204 transfer.tx_cmd_sz = transfer.rx_cmd_sz = st->st_command.iov_len); 205 if ((error == 0) && (st->st_data.iov_len > 0)) 206 error = copyin(st->st_data.iov_base, transfer.tx_data, 207 transfer.tx_data_sz = transfer.rx_data_sz = 208 st->st_data.iov_len); 209 if (error == 0) 210 error = SPIBUS_TRANSFER(device_get_parent(dev), dev, &transfer); 211 if (error == 0) { 212 error = copyout(transfer.rx_cmd, st->st_command.iov_base, 213 transfer.rx_cmd_sz); 214 if ((error == 0) && (st->st_data.iov_len > 0)) 215 error = copyout(transfer.rx_data, st->st_data.iov_base, 216 transfer.rx_data_sz); 217 } 218 219 free(transfer.tx_cmd, M_DEVBUF); 220 free(transfer.tx_data, M_DEVBUF); 221 return (error); 222 } 223 224 static int 225 spigen_transfer_mmapped(struct cdev *cdev, struct spigen_transfer_mmapped *stm) 226 { 227 struct spi_command transfer = SPI_COMMAND_INITIALIZER; 228 device_t dev = cdev->si_drv1; 229 struct spigen_mmap *mmap; 230 int error; 231 232 if ((error = devfs_get_cdevpriv((void **)&mmap)) != 0) 233 return (error); 234 235 if (mmap->bufsize < stm->stm_command_length + stm->stm_data_length) 236 return (E2BIG); 237 238 transfer.tx_cmd = transfer.rx_cmd = (void *)((uintptr_t)mmap->kvaddr); 239 transfer.tx_cmd_sz = transfer.rx_cmd_sz = stm->stm_command_length; 240 transfer.tx_data = transfer.rx_data = 241 (void *)((uintptr_t)mmap->kvaddr + stm->stm_command_length); 242 transfer.tx_data_sz = transfer.rx_data_sz = stm->stm_data_length; 243 error = SPIBUS_TRANSFER(device_get_parent(dev), dev, &transfer); 244 245 return (error); 246 } 247 248 static int 249 spigen_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag, 250 struct thread *td) 251 { 252 device_t dev = cdev->si_drv1; 253 int error; 254 255 switch (cmd) { 256 case SPIGENIOC_TRANSFER: 257 error = spigen_transfer(cdev, (struct spigen_transfer *)data); 258 break; 259 case SPIGENIOC_TRANSFER_MMAPPED: 260 error = spigen_transfer_mmapped(cdev, (struct spigen_transfer_mmapped *)data); 261 break; 262 case SPIGENIOC_GET_CLOCK_SPEED: 263 error = spibus_get_clock(dev, (uint32_t *)data); 264 break; 265 case SPIGENIOC_SET_CLOCK_SPEED: 266 error = spibus_set_clock(dev, *(uint32_t *)data); 267 break; 268 case SPIGENIOC_GET_SPI_MODE: 269 error = spibus_get_mode(dev, (uint32_t *)data); 270 break; 271 case SPIGENIOC_SET_SPI_MODE: 272 error = spibus_set_mode(dev, *(uint32_t *)data); 273 break; 274 default: 275 error = ENOTTY; 276 break; 277 } 278 return (error); 279 } 280 281 static void 282 spigen_mmap_cleanup(void *arg) 283 { 284 struct spigen_mmap *mmap = arg; 285 286 if (mmap->kvaddr != 0) 287 pmap_qremove(mmap->kvaddr, mmap->bufsize / PAGE_SIZE); 288 if (mmap->bufobj != NULL) 289 vm_object_deallocate(mmap->bufobj); 290 free(mmap, M_DEVBUF); 291 } 292 293 static int 294 spigen_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, 295 vm_size_t size, struct vm_object **object, int nprot) 296 { 297 struct spigen_mmap *mmap; 298 vm_page_t *m; 299 size_t n, pages; 300 int error; 301 302 if (size == 0 || 303 (nprot & (PROT_EXEC | PROT_READ | PROT_WRITE)) 304 != (PROT_READ | PROT_WRITE)) 305 return (EINVAL); 306 size = roundup2(size, PAGE_SIZE); 307 pages = size / PAGE_SIZE; 308 309 if (devfs_get_cdevpriv((void **)&mmap) == 0) 310 return (EBUSY); 311 312 mmap = malloc(sizeof(*mmap), M_DEVBUF, M_ZERO | M_WAITOK); 313 if ((mmap->kvaddr = kva_alloc(size)) == 0) { 314 spigen_mmap_cleanup(mmap); 315 return (ENOMEM); 316 } 317 mmap->bufsize = size; 318 mmap->bufobj = vm_pager_allocate(OBJT_PHYS, 0, size, nprot, 0, 319 curthread->td_ucred); 320 321 m = malloc(sizeof(*m) * pages, M_TEMP, M_WAITOK); 322 VM_OBJECT_WLOCK(mmap->bufobj); 323 vm_object_reference_locked(mmap->bufobj); // kernel and userland both 324 for (n = 0; n < pages; n++) { 325 m[n] = vm_page_grab(mmap->bufobj, n, 326 VM_ALLOC_ZERO | VM_ALLOC_WIRED); 327 vm_page_valid(m[n]); 328 vm_page_xunbusy(m[n]); 329 } 330 VM_OBJECT_WUNLOCK(mmap->bufobj); 331 pmap_qenter(mmap->kvaddr, m, pages); 332 free(m, M_TEMP); 333 334 if ((error = devfs_set_cdevpriv(mmap, spigen_mmap_cleanup)) != 0) { 335 /* Two threads were racing through this code; we lost. */ 336 spigen_mmap_cleanup(mmap); 337 return (error); 338 } 339 *offset = 0; 340 *object = mmap->bufobj; 341 342 return (0); 343 } 344 345 static int 346 spigen_close(struct cdev *cdev, int fflag, int devtype, struct thread *td) 347 { 348 device_t dev = cdev->si_drv1; 349 struct spigen_softc *sc = device_get_softc(dev); 350 351 mtx_lock(&sc->sc_mtx); 352 device_unbusy(sc->sc_dev); 353 mtx_unlock(&sc->sc_mtx); 354 return (0); 355 } 356 357 static int 358 spigen_detach(device_t dev) 359 { 360 struct spigen_softc *sc; 361 362 sc = device_get_softc(dev); 363 364 #ifdef SPIGEN_LEGACY_CDEVNAME 365 if (sc->sc_adev) 366 destroy_dev(sc->sc_adev); 367 #endif 368 369 if (sc->sc_cdev) 370 destroy_dev(sc->sc_cdev); 371 372 mtx_destroy(&sc->sc_mtx); 373 374 return (0); 375 } 376 377 static device_method_t spigen_methods[] = { 378 /* Device interface */ 379 DEVMETHOD(device_probe, spigen_probe), 380 DEVMETHOD(device_attach, spigen_attach), 381 DEVMETHOD(device_detach, spigen_detach), 382 { 0, 0 } 383 }; 384 385 static driver_t spigen_driver = { 386 "spigen", 387 spigen_methods, 388 sizeof(struct spigen_softc), 389 }; 390 391 DRIVER_MODULE(spigen, spibus, spigen_driver, 0, 0); 392 MODULE_DEPEND(spigen, spibus, 1, 1, 1); 393 #ifdef FDT 394 SIMPLEBUS_PNP_INFO(compat_data); 395 #endif 396